diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 00000000000..4a26e5f0233
Binary files /dev/null and b/.DS_Store differ
diff --git a/.gitignore b/.gitignore
index 4e6417a74ef..1bc0ae472a4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,14 +8,10 @@
*.etcd
*.log
*.swp
-/etcd
/hack/insta-discovery/.env
*.coverprofile
*.test
hack/tls-setup/certs
-.idea
-*.iml
-/contrib/mixin/manifests
/contrib/raftexample/raftexample
/contrib/raftexample/raftexample-*
/vendor
@@ -23,15 +19,5 @@ hack/tls-setup/certs
*.tmp
*.bak
.gobincache/
-.DS_Store
-/Documentation/dev-guide/api_reference_v3.md
-/Documentation/dev-guide/api_concurrency_reference_v3.md
-
-/tools/etcd-dump-db/etcd-dump-db
-/tools/etcd-dump-logs/etcd-dump-logs
-/tools/etcd-dump-metrics/etcd-dump-metrics
-/tools/local-tester/bridge/bridge
-/tools/proto-annotations/proto-annotations
-/tools/benchmark/benchmark
-/out
-/etcd-dump-logs
+default.etcd
+raftexample/db/*
diff --git a/.golangci.yaml b/.golangci.yaml
deleted file mode 100644
index d169aa4e7fa..00000000000
--- a/.golangci.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-run:
- timeout: 30m
- skip-files:
- - "^zz_generated.*"
-
-issues:
- max-same-issues: 0
- # Excluding configuration per-path, per-linter, per-text and per-source
- exclude-rules:
- # exclude ineffassing linter for generated files for conversion
- - path: conversion\.go
- linters:
- - ineffassign
-
-linters:
- disable-all: true
- enable: # please keep this alphabetized
- # Don't use soon to deprecated[1] linters that lead to false
- # https://github.com/golangci/golangci-lint/issues/1841
- # - deadcode
- # - structcheck
- # - varcheck
- - goimports
- - ineffassign
- - revive
- - staticcheck
- - stylecheck
- - unused
- - unconvert # Remove unnecessary type conversions
-
-linters-settings: # please keep this alphabetized
- goimports:
- local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages.
- staticcheck:
- checks:
- - "all"
- - "-SA1019" # TODO(fix) Using a deprecated function, variable, constant or field
- - "-SA2002" # TODO(fix) Called testing.T.FailNow or SkipNow in a goroutine, which isn’t allowed
- stylecheck:
- checks:
- - "ST1019" # Importing the same package multiple times.
diff --git a/.header b/.header
deleted file mode 100644
index 0446af6d877..00000000000
--- a/.header
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
diff --git a/ADOPTERS.md b/ADOPTERS.md
deleted file mode 100644
index c6c294637d3..00000000000
--- a/ADOPTERS.md
+++ /dev/null
@@ -1,250 +0,0 @@
----
-title: Production users
----
-
-This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on how etcd is working in the field and update this list.
-
-## All Kubernetes Users
-
-- *Application*: https://kubernetes.io/
-- *Environments*: AWS, OpenStack, Azure, Google Cloud, Huawei Cloud, Bare Metal, etc
-
-**This is a meta user; please feel free to document specific Kubernetes clusters!**
-
-All Kubernetes clusters use etcd as their primary data store. This means etcd's users include such companies as [Niantic, Inc Pokemon Go](https://cloudplatform.googleblog.com/2016/09/bringing-Pokemon-GO-to-life-on-Google-Cloud.html), [Box](https://blog.box.com/blog/kubernetes-box-microservices-maximum-velocity/), [CoreOS](https://coreos.com/tectonic), [Ticketmaster](https://www.youtube.com/watch?v=wqXVKneP0Hg), [Salesforce](https://www.salesforce.com) and many many more.
-
-## discovery.etcd.io
-
-- *Application*: https://github.com/coreos/discovery.etcd.io
-- *Launched*: Feb. 2014
-- *Cluster Size*: 5 members, 5 discovery proxies
-- *Order of Data Size*: 100s of Megabytes
-- *Operator*: CoreOS, brandon.philips@coreos.com
-- *Environment*: AWS
-- *Backups*: Periodic async to S3
-
-discovery.etcd.io is the longest continuously running etcd backed service that we know about. It is the basis of automatic cluster bootstrap and was launched in Feb. 2014: https://coreos.com/blog/etcd-0.3.0-released/.
-
-## OpenTable
-
-- *Application*: OpenTable internal service discovery and cluster configuration management
-- *Launched*: May 2014
-- *Cluster Size*: 3 members each in 6 independent clusters; approximately 50 nodes reading / writing
-- *Order of Data Size*: 10s of MB
-- *Operator*: OpenTable, Inc; sschlansker@opentable.com
-- *Environment*: AWS, VMWare
-- *Backups*: None, all data can be re-created if necessary.
-
-## cycoresys.com
-
-- *Application*: multiple
-- *Launched*: Jul. 2014
-- *Cluster Size*: 3 members, _n_ proxies
-- *Order of Data Size*: 100s of kilobytes
-- *Operator*: CyCore Systems, Inc, sys@cycoresys.com
-- *Environment*: Baremetal
-- *Backups*: Periodic sync to Ceph RadosGW and DigitalOcean VM
-
-CyCore Systems provides architecture and engineering for computing systems. This cluster provides microservices, virtual machines, databases, storage clusters to a number of clients. It is built on CoreOS machines, with each machine in the cluster running etcd as a peer or proxy.
-
-## Radius Intelligence
-
-- *Application*: multiple internal tools, Kubernetes clusters, bootstrappable system configs
-- *Launched*: June 2015
-- *Cluster Size*: 2 clusters of 5 and 3 members; approximately a dozen nodes read/write
-- *Order of Data Size*: 100s of kilobytes
-- *Operator*: Radius Intelligence; jcderr@radius.com
-- *Environment*: AWS, CoreOS, Kubernetes
-- *Backups*: None, all data can be recreated if necessary.
-
-Radius Intelligence uses Kubernetes running CoreOS to containerize and scale internal toolsets. Examples include running [JetBrains TeamCity][teamcity] and internal AWS security and cost reporting tools. etcd clusters back these clusters as well as provide some basic environment bootstrapping configuration keys.
-
-## Vonage
-
-- *Application*: kubernetes, vault backend, system configuration for microservices, scheduling, locks (future - service discovery)
-- *Launched*: August 2015
-- *Cluster Size*: 2 clusters of 5 members in 2 DCs, n local proxies 1-to-1 with microservice, (ssl and SRV look up)
-- *Order of Data Size*: kilobytes
-- *Operator*: Vonage [devAdmin][raoofm]
-- *Environment*: VMWare, AWS
-- *Backups*: Daily snapshots on VMs. Backups done for upgrades.
-
-## PD
-
-- *Application*: embed etcd
-- *Launched*: Mar 2016
-- *Cluster Size*: 3 or 5 members
-- *Order of Data Size*: megabytes
-- *Operator*: PingCAP, Inc.
-- *Environment*: Bare Metal, AWS, etc.
-- *Backups*: None.
-
-PD(Placement Driver) is the central controller in the TiDB cluster. It saves the cluster meta information, schedule the data, allocate the global unique timestamp for the distributed transaction, etc. It embeds etcd to supply high availability and auto failover.
-
-## Huawei
-
-- *Application*: System configuration for overlay network (Canal)
-- *Launched*: June 2016
-- *Cluster Size*: 3 members for each cluster
-- *Order of Data Size*: kilobytes
-- *Operator*: Huawei Euler Department
-- *Environment*: [Huawei Cloud](http://www.hwclouds.com/product/cce.html)
-- *Backups*: None, all data can be recreated if necessary.
-
-[teamcity]: https://www.jetbrains.com/teamcity/
-[raoofm]:https://github.com/raoofm
-
-## Qiniu Cloud
-
-- *Application*: system configuration for microservices, distributed locks
-- *Launched*: Jan. 2016
-- *Cluster Size*: 3 members each with several clusters
-- *Order of Data Size*: kilobytes
-- *Operator*: Pandora, chenchao@qiniu.com
-- *Environment*: Baremetal
-- *Backups*: None, all data can be recreated if necessary
-
-## QingCloud
-
-- *Application*: [QingCloud][qingcloud] appcenter cluster for service discovery as [metad][metad] backend.
-- *Launched*: December 2016
-- *Cluster Size*: 1 cluster of 3 members per user.
-- *Order of Data Size*: kilobytes
-- *Operator*: [yunify][yunify]
-- *Environment*: QingCloud IaaS
-- *Backups*: None, all data can be recreated if necessary.
-
-[metad]:https://github.com/yunify/metad
-[yunify]:https://github.com/yunify
-[qingcloud]:https://qingcloud.com/
-
-
-## Yandex
-
-- *Application*: system configuration for services, service discovery
-- *Launched*: March 2016
-- *Cluster Size*: 3 clusters of 5 members
-- *Order of Data Size*: several gigabytes
-- *Operator*: Yandex; [nekto0n][nekto0n]
-- *Environment*: Bare Metal
-- *Backups*: None
-
-[nekto0n]:https://github.com/nekto0n
-
-## Tencent Games
-
-- *Application*: Meta data and configuration data for service discovery, Kubernetes, etc.
-- *Launched*: Jan. 2015
-- *Cluster Size*: 3 members each with 10s of clusters
-- *Order of Data Size*: 10s of Megabytes
-- *Operator*: Tencent Game Operations Department
-- *Environment*: Baremetal
-- *Backups*: Periodic sync to backup server
-
-In Tencent games, we use Docker and Kubernetes to deploy and run our applications, and use etcd to save meta data for service discovery, Kubernetes, etc.
-
-## Hyper.sh
-
-- *Application*: Kubernetes, distributed locks, etc.
-- *Launched*: April 2016
-- *Cluster Size*: 1 cluster of 3 members
-- *Order of Data Size*: 10s of MB
-- *Operator*: Hyper.sh
-- *Environment*: Baremetal
-- *Backups*: None, all data can be recreated if necessary.
-
-In [hyper.sh][hyper.sh], the container service is backed by [hypernetes][hypernetes], a multi-tenant kubernetes distro. Moreover, we use etcd to coordinate the multiple manage services and store global meta data.
-
-[hypernetes]:https://github.com/hyperhq/hypernetes
-[Hyper.sh]:https://www.hyper.sh
-
-## Meitu
-- *Application*: system configuration for services, service discovery, kubernetes in test environment
-- *Launched*: October 2015
-- *Cluster Size*: 1 cluster of 3 members
-- *Order of Data Size*: megabytes
-- *Operator*: Meitu, hxj@meitu.com, [shafreeck][shafreeck]
-- *Environment*: Bare Metal
-- *Backups*: None, all data can be recreated if necessary.
-
-[shafreeck]:https://github.com/shafreeck
-
-## Grab
-- *Application*: system configuration for services, service discovery
-- *Launched*: June 2016
-- *Cluster Size*: 1 cluster of 7 members
-- *Order of Data Size*: megabytes
-- *Operator*: Grab, [taxitan][taxitan], [reterVision][reterVision]
-- *Environment*: AWS
-- *Backups*: None, all data can be recreated if necessary.
-
-[taxitan]:https://github.com/taxitan
-[reterVision]:https://github.com/reterVision
-
-## DaoCloud.io
-
-- *Application*: container management
-- *Launched*: Sep. 2015
-- *Cluster Size*: 1000+ deployments, each deployment contains a 3 node cluster.
-- *Order of Data Size*: 100s of Megabytes
-- *Operator*: daocloud.io
-- *Environment*: Baremetal and virtual machines
-- *Backups*: None, all data can be recreated if necessary.
-
-In [DaoCloud][DaoCloud], we use Docker and Swarm to deploy and run our applications, and we use etcd to save metadata for service discovery.
-
-[DaoCloud]:https://www.daocloud.io
-
-## Branch.io
-
-- *Application*: Kubernetes
-- *Launched*: April 2016
-- *Cluster Size*: Multiple clusters, multiple sizes
-- *Order of Data Size*: 100s of Megabytes
-- *Operator*: branch.io
-- *Environment*: AWS, Kubernetes
-- *Backups*: EBS volume backups
-
-At [Branch][branch], we use kubernetes heavily as our core microservice platform for staging and production.
-
-[branch]: https://branch.io
-
-## Baidu Waimai
-
-- *Application*: SkyDNS, Kubernetes, UDC, CMDB and other distributed systems
-- *Launched*: April. 2016
-- *Cluster Size*: 3 clusters of 5 members
-- *Order of Data Size*: several gigabytes
-- *Operator*: Baidu Waimai Operations Department
-- *Environment*: CentOS 6.5
-- *Backups*: backup scripts
-
-## Salesforce.com
-
-- *Application*: Kubernetes
-- *Launched*: Jan 2017
-- *Cluster Size*: Multiple clusters of 3 members
-- *Order of Data Size*: 100s of Megabytes
-- *Operator*: Salesforce.com (krmayankk@github)
-- *Environment*: BareMetal
-- *Backups*: None, all data can be recreated
-
-## Hosted Graphite
-
-- *Application*: Service discovery, locking, ephemeral application data
-- *Launched*: January 2017
-- *Cluster Size*: 2 clusters of 7 members
-- *Order of Data Size*: Megabytes
-- *Operator*: Hosted Graphite (sre@hostedgraphite.com)
-- *Environment*: Bare Metal
-- *Backups*: None, all data is considered ephemeral.
-
-## Transwarp
-
-- *Application*: Transwarp Data Cloud, Transwarp Operating System, Transwarp Data Hub, Sophon
-- *Launched*: January 2016
-- *Cluster Size*: Multiple clusters, multiple sizes
-- *Order of Data Size*: Megabytes
-- *Operator*: Trasnwarp Operating System
-- *Environment*: Bare Metal, Container
-- *Backups*: backup scripts
diff --git a/CHANGELOG/CHANGELOG-2.3.md b/CHANGELOG/CHANGELOG-2.3.md
deleted file mode 100644
index 0b54062b1d8..00000000000
--- a/CHANGELOG/CHANGELOG-2.3.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-
-
-
-## [v2.3.8](https://github.com/etcd-io/etcd/releases/tag/v2.3.8) (2017-02-17)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v2.3.7...v2.3.8).
-
-### Go
-
-- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
diff --git a/CHANGELOG/CHANGELOG-3.0.md b/CHANGELOG/CHANGELOG-3.0.md
deleted file mode 100644
index bc11c80a5f0..00000000000
--- a/CHANGELOG/CHANGELOG-3.0.md
+++ /dev/null
@@ -1,291 +0,0 @@
-
-
-
-
-
-## [v3.0.16](https://github.com/etcd-io/etcd/releases/tag/v3.0.16) (2016-11-13)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.15...v3.0.16) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Go
-
-- Compile with [*Go 1.6.4*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.15](https://github.com/etcd-io/etcd/releases/tag/v3.0.15) (2016-11-11)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.14...v3.0.15) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Fixed
-
-- Fix cancel watch request with wrong range end.
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.14](https://github.com/etcd-io/etcd/releases/tag/v3.0.14) (2016-11-04)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.13...v3.0.14) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Added
-
-- v3 `etcdctl migrate` command now supports `--no-ttl` flag to discard keys on transform.
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.13](https://github.com/etcd-io/etcd/releases/tag/v3.0.13) (2016-10-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.12...v3.0.13) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.12](https://github.com/etcd-io/etcd/releases/tag/v3.0.12) (2016-10-07)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.11...v3.0.12) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.11](https://github.com/etcd-io/etcd/releases/tag/v3.0.11) (2016-10-07)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.10...v3.0.11) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Added
-
-- Server returns previous key-value (optional)
- - `clientv3.WithPrevKV` option
- - v3 etcdctl `put,watch,del --prev-kv` flag
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.10](https://github.com/etcd-io/etcd/releases/tag/v3.0.10) (2016-09-23)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.9...v3.0.10) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.9](https://github.com/etcd-io/etcd/releases/tag/v3.0.9) (2016-09-15)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.8...v3.0.9) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Added
-
-- Warn on domain names on listen URLs (v3.2 will reject domain names).
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.8](https://github.com/etcd-io/etcd/releases/tag/v3.0.8) (2016-09-09)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.7...v3.0.8) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Other
-
-- Allow only IP addresses in listen URLs (domain names are rejected).
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.7](https://github.com/etcd-io/etcd/releases/tag/v3.0.7) (2016-08-31)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.6...v3.0.7) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Other
-
-- SRV records only allow A records (RFC 2052).
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.6](https://github.com/etcd-io/etcd/releases/tag/v3.0.6) (2016-08-19)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.5...v3.0.6) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.5](https://github.com/etcd-io/etcd/releases/tag/v3.0.5) (2016-08-19)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.4...v3.0.5) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Other
-
-- SRV records (e.g., infra1.example.com) must match the discovery domain (i.e., example.com) if no custom certificate authority is given.
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.4](https://github.com/etcd-io/etcd/releases/tag/v3.0.4) (2016-07-27)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.3...v3.0.4) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Added
-
-- v2 `etcdctl ls` command now supports `--output=json`.
-- Add /var/lib/etcd directory to etcd official Docker image.
-
-### Other
-
-- v2 auth can now use common name from TLS certificate when `--client-cert-auth` is enabled.
-
-### Go
-
-- Compile with [*Go 1.6.3*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.3](https://github.com/etcd-io/etcd/releases/tag/v3.0.3) (2016-07-15)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.2...v3.0.3) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Other
-
-- Revert Dockerfile to use `CMD`, instead of `ENTRYPOINT`, to support `etcdctl` run.
- - Docker commands for v3.0.2 won't work without specifying executable binary paths.
-- v3 etcdctl default endpoints are now `127.0.0.1:2379`.
-
-### Go
-
-- Compile with [*Go 1.6.2*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.2](https://github.com/etcd-io/etcd/releases/tag/v3.0.2) (2016-07-08)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.1...v3.0.2) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Other
-
-- Dockerfile uses `ENTRYPOINT`, instead of `CMD`, to run etcd without binary path specified.
-
-### Go
-
-- Compile with [*Go 1.6.2*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.1](https://github.com/etcd-io/etcd/releases/tag/v3.0.1) (2016-07-01)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.0...v3.0.1) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Go
-
-- Compile with [*Go 1.6.2*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
-
-## [v3.0.0](https://github.com/etcd-io/etcd/releases/tag/v3.0.0) (2016-06-30)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v2.3.0...v3.0.0) and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_0/).**
-
-### Go
-
-- Compile with [*Go 1.6.2*](https://golang.org/doc/devel/release.html#go1.6).
-
-
-
-
diff --git a/CHANGELOG/CHANGELOG-3.1.md b/CHANGELOG/CHANGELOG-3.1.md
deleted file mode 100644
index 0c97517a7e2..00000000000
--- a/CHANGELOG/CHANGELOG-3.1.md
+++ /dev/null
@@ -1,574 +0,0 @@
-
-
-Previous change logs can be found at [CHANGELOG-3.0](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.0.md).
-
-
-
-## [v3.1.21](https://github.com/etcd-io/etcd/releases/tag/v3.1.21) (2019-TBD)
-
-### etcdctl v3
-
-- [Strip out insecure endpoints from DNS SRV records when using discovery](https://github.com/etcd-io/etcd/pull/10443) with etcdctl v2
-- Add [`etcdctl endpoint health --write-out` support](https://github.com/etcd-io/etcd/pull/9540).
- - Previously, [`etcdctl endpoint health --write-out json` did not work](https://github.com/etcd-io/etcd/issues/9532).
- - The command output is changed. Previously, if endpoint is unreachable, the command output is
- "\ is unhealthy: failed to connect: \". This change unified the error message, all error types
- now have the same output "\ is unhealthy: failed to commit proposal: \".
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Fix bug where [db_compaction_total_duration_milliseconds metric incorrectly measured duration as 0](https://github.com/etcd-io/etcd/pull/10646).
-
-
-
-## [v3.1.20](https://github.com/etcd-io/etcd/releases/tag/v3.1.20) (2018-10-10)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.19...v3.1.20) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Improved
-
-- Improve ["became inactive" warning log](https://github.com/etcd-io/etcd/pull/10024), which indicates message send to a peer failed.
-- Improve [read index wait timeout warning log](https://github.com/etcd-io/etcd/pull/10026), which indicates that local node might have slow network.
-- Add [gRPC interceptor for debugging logs](https://github.com/etcd-io/etcd/pull/9990); enable `etcd --debug` flag to see per-request debug information.
-- Add [consistency check in snapshot status](https://github.com/etcd-io/etcd/pull/10109). If consistency check on snapshot file fails, `snapshot status` returns `"snapshot file integrity check failed..."` error.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Improve [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/10155) Prometheus metric to track leader heartbeats.
- - Previously, it only samples the TCP connection for snapshot messages.
-- Display all registered [gRPC metrics at start](https://github.com/etcd-io/etcd/pull/10034).
-- Add [`etcd_snap_db_fsync_duration_seconds_count`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_snap_db_save_total_duration_seconds_bucket`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_server_id`](https://github.com/etcd-io/etcd/pull/9998) Prometheus metric.
-- Add [`etcd_server_health_success`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric.
-- Add [`etcd_server_health_failures`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric.
-- Add [`etcd_server_read_indexes_failed_total`](https://github.com/etcd-io/etcd/pull/10094) Prometheus metric.
-
-### client v3
-
-- Fix logic on [release lock key if cancelled](https://github.com/etcd-io/etcd/pull/10153) in `clientv3/concurrency` package.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.19](https://github.com/etcd-io/etcd/releases/tag/v3.1.19) (2018-07-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.18...v3.1.19) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Improved
-
-- Improve [Raft Read Index timeout warning messages](https://github.com/etcd-io/etcd/pull/9897).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_server_go_version`](https://github.com/etcd-io/etcd/pull/9957) Prometheus metric.
-- Add [`etcd_server_slow_read_indexes_total`](https://github.com/etcd-io/etcd/pull/9897) Prometheus metric.
-- Add [`etcd_server_quota_backend_bytes`](https://github.com/etcd-io/etcd/pull/9820) Prometheus metric.
- - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`.
- - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB.
- - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB.
- - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete.
- - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation.
-- Add [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) Prometheus metric.
- - In addition to [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819).
-- Add [`etcd_mvcc_db_total_size_in_use_in_bytes`](https://github.com/etcd-io/etcd/pull/9256) Prometheus metric.
- - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`.
- - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB.
- - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB.
- - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete.
- - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation.
-
-### client v3
-
-- Fix [lease keepalive interval updates when response queue is full](https://github.com/etcd-io/etcd/pull/9952).
- - If `<-chan *clientv3LeaseKeepAliveResponse` from `clientv3.Lease.KeepAlive` was never consumed or channel is full, client was [sending keepalive request every 500ms](https://github.com/etcd-io/etcd/issues/9911) instead of expected rate of every "TTL / 3" duration.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.18](https://github.com/etcd-io/etcd/releases/tag/v3.1.18) (2018-06-15)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.17...v3.1.18) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_server_version`](https://github.com/etcd-io/etcd/pull/8960) Prometheus metric.
- - To replace [Kubernetes `etcd-version-monitor`](https://github.com/etcd-io/etcd/issues/8948).
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.17](https://github.com/etcd-io/etcd/releases/tag/v3.1.17) (2018-06-06)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.16...v3.1.17) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd server
-
-- Fix [v3 snapshot recovery](https://github.com/etcd-io/etcd/issues/7628).
- - A follower receives a leader snapshot to be persisted as a `[SNAPSHOT-INDEX].snap.db` file on disk.
- - Now, server [ensures that the incoming snapshot be persisted on disk before loading it](https://github.com/etcd-io/etcd/pull/7876).
- - Otherwise, index mismatch happens and triggers server-side panic (e.g. newer WAL entry with outdated snapshot index).
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.16](https://github.com/etcd-io/etcd/releases/tag/v3.1.16) (2018-05-31)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.15...v3.1.16) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd server
-
-- Fix [`mvcc` server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775).
- - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation.
- - Now, this server-side panic has been fixed.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.15](https://github.com/etcd-io/etcd/releases/tag/v3.1.15) (2018-05-09)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.14...v3.1.15) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd server
-
-- Purge old [`*.snap.db` snapshot files](https://github.com/etcd-io/etcd/pull/7967).
- - Previously, etcd did not respect `--max-snapshots` flag to purge old `*.snap.db` files.
- - Now, etcd purges old `*.snap.db` files to keep maximum `--max-snapshots` number of files on disk.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.14](https://github.com/etcd-io/etcd/releases/tag/v3.1.14) (2018-04-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.13...v3.1.14) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_server_is_leader`](https://github.com/etcd-io/etcd/pull/9587) Prometheus metric.
-
-### etcd server
-
-- Add [`--initial-election-tick-advance`](https://github.com/etcd-io/etcd/pull/9591) flag to configure initial election tick fast-forward.
- - By default, `--initial-election-tick-advance=true`, then local member fast-forwards election ticks to speed up "initial" leader election trigger.
- - This benefits the case of larger election ticks. For instance, cross datacenter deployment may require longer election timeout of 10-second. If true, local node does not need wait up to 10-second. Instead, forwards its election ticks to 8-second, and have only 2-second left before leader election.
- - Major assumptions are that: cluster has no active leader thus advancing ticks enables faster leader election. Or cluster already has an established leader, and rejoining follower is likely to receive heartbeats from the leader after tick advance and before election timeout.
- - However, when network from leader to rejoining follower is congested, and the follower does not receive leader heartbeat within left election ticks, disruptive election has to happen thus affecting cluster availabilities.
- - Now, this can be disabled by setting `--initial-election-tick-advance=false`.
- - Disabling this would slow down initial bootstrap process for cross datacenter deployments. Make tradeoffs by configuring `--initial-election-tick-advance` at the cost of slow initial bootstrap.
- - If single-node, it advances ticks regardless.
- - Address [disruptive rejoining follower node](https://github.com/etcd-io/etcd/issues/9333).
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.13](https://github.com/etcd-io/etcd/releases/tag/v3.1.13) (2018-03-29)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.12...v3.1.13) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Improved
-
-- Adjust [election timeout on server restart](https://github.com/etcd-io/etcd/pull/9415) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333).
- - Previously, etcd fast-forwards election ticks on server start, with only one tick left for leader election. This is to speed up start phase, without having to wait until all election ticks elapse. Advancing election ticks is useful for cross datacenter deployments with larger election timeouts. However, it was affecting cluster availability if the last tick elapses before leader contacts the restarted node.
- - Now, when etcd restarts, it adjusts election ticks with more than one tick left, thus more time for leader to prevent disruptive restart.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add missing [`etcd_network_peer_sent_failures_total` count](https://github.com/etcd-io/etcd/pull/9437).
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.12](https://github.com/etcd-io/etcd/releases/tag/v3.1.12) (2018-03-08)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.11...v3.1.12) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd server
-
-- Fix [`mvcc` "unsynced" watcher restore operation](https://github.com/etcd-io/etcd/pull/9297).
- - "unsynced" watcher is watcher that needs to be in sync with events that have happened.
- - That is, "unsynced" watcher is the slow watcher that was requested on old revision.
- - "unsynced" watcher restore operation was not correctly populating its underlying watcher group.
- - Which possibly causes [missing events from "unsynced" watchers](https://github.com/etcd-io/etcd/issues/9086).
- - A node gets network partitioned with a watcher on a future revision, and falls behind receiving a leader snapshot after partition gets removed. When applying this snapshot, etcd watch storage moves current synced watchers to unsynced since sync watchers might have become stale during network partition. And reset synced watcher group to restart watcher routines. Previously, there was a bug when moving from synced watcher group to unsynced, thus client would miss events when the watcher was requested to the network-partitioned node.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.11](https://github.com/etcd-io/etcd/releases/tag/v3.1.11) (2017-11-28)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.10...v3.1.11) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd server
-
-- [#8411](https://github.com/etcd-io/etcd/issues/8411),[#8806](https://github.com/etcd-io/etcd/pull/8806) backport "mvcc: sending events after restore"
-- [#8009](https://github.com/etcd-io/etcd/issues/8009),[#8902](https://github.com/etcd-io/etcd/pull/8902) backport coreos/bbolt v1.3.1-coreos.5
-
-### Go
-
-- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.1.10](https://github.com/etcd-io/etcd/releases/tag/v3.1.10) (2017-07-14)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.9...v3.1.10) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Added
-
-- Tag docker images with minor versions.
- - e.g. `docker pull quay.io/coreos/etcd:v3.1` to fetch latest v3.1 versions.
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
- - Fix panic on `net/http.CloseNotify`
-
-
-
-
-
-## [v3.1.9](https://github.com/etcd-io/etcd/releases/tag/v3.1.9) (2017-06-09)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.8...v3.1.9) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd server
-
-- Allow v2 snapshot over 512MB.
-
-### Go
-
-- Compile with [*Go 1.7.6*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
-
-## [v3.1.8](https://github.com/etcd-io/etcd/releases/tag/v3.1.8) (2017-05-19)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.7...v3.1.8) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Go
-
-- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
-
-## [v3.1.7](https://github.com/etcd-io/etcd/releases/tag/v3.1.7) (2017-04-28)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.6...v3.1.7) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Go
-
-- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
-
-## [v3.1.6](https://github.com/etcd-io/etcd/releases/tag/v3.1.6) (2017-04-19)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.5...v3.1.6) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd server
-
-- Fill in Auth API response header.
-- Remove auth check in Status API.
-
-### Go
-
-- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
-
-## [v3.1.5](https://github.com/etcd-io/etcd/releases/tag/v3.1.5) (2017-03-27)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.4...v3.1.5) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd server
-
-- Fix raft memory leak issue.
-- Fix Windows file path issues.
-
-### Other
-
-- Add `/etc/nsswitch.conf` file to alpine-based Docker image.
-
-### Go
-
-- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
-
-## [v3.1.4](https://github.com/etcd-io/etcd/releases/tag/v3.1.4) (2017-03-22)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.3...v3.1.4) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Go
-
-- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
-
-## [v3.1.3](https://github.com/etcd-io/etcd/releases/tag/v3.1.3) (2017-03-10)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.2...v3.1.3) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd gateway
-
-- Fix `etcd gateway` schema handling in DNS discovery.
-- Fix sd_notify behaviors in `gateway`, `grpc-proxy`.
-
-### gRPC Proxy
-
-- Fix sd_notify behaviors in `gateway`, `grpc-proxy`.
-
-### Other
-
-- Use machine default host when advertise URLs are default values(`localhost:2379,2380`) AND if listen URL is `0.0.0.0`.
-
-### Go
-
-- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
-
-## [v3.1.2](https://github.com/etcd-io/etcd/releases/tag/v3.1.2) (2017-02-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.1...v3.1.2) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### etcd gateway
-
-- Fix `etcd gateway` with multiple endpoints.
-
-### Other
-
-- Use IPv4 default host, by default (when IPv4 and IPv6 are available).
-
-### Go
-
-- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
-
-## [v3.1.1](https://github.com/etcd-io/etcd/releases/tag/v3.1.1) (2017-02-17)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.0...v3.1.1) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Go
-
-- Compile with [*Go 1.7.5*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
-
-## [v3.1.0](https://github.com/etcd-io/etcd/releases/tag/v3.1.0) (2017-01-20)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.0.0...v3.1.0) and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.1 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_1/).**
-
-### Improved
-
-- Faster linearizable reads (implements Raft [read-index](https://github.com/etcd-io/etcd/pull/6212)).
-- v3 authentication API is now stable.
-
-### Breaking Changes
-
-- Deprecated following gRPC metrics in favor of [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus).
- - `etcd_grpc_requests_total`
- - `etcd_grpc_requests_failed_total`
- - `etcd_grpc_active_streams`
- - `etcd_grpc_unary_requests_duration_seconds`
-
-### Dependency
-
-- Upgrade [`github.com/ugorji/go/codec`](https://github.com/ugorji/go) to [**`ugorji/go@9c7f9b7`**](https://github.com/ugorji/go/commit/9c7f9b7a2bc3a520f7c7b30b34b7f85f47fe27b6), and [regenerate v2 `client`](https://github.com/etcd-io/etcd/pull/6945).
-
-### Security, Authentication
-
-See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details.
-
-- SRV records (e.g., infra1.example.com) must match the discovery domain (i.e., example.com) if no custom certificate authority is given.
- - `TLSConfig.ServerName` is ignored with user-provided certificates for backwards compatibility; to be deprecated.
- - For example, `etcd --discovery-srv=example.com` will only authenticate peers/clients when the provided certs have root domain `example.com` as an entry in Subject Alternative Name (SAN) field.
-
-### etcd server
-
-- Automatic leadership transfer when leader steps down.
-- etcd flags
- - `--strict-reconfig-check` flag is set by default.
- - Add `--log-output` flag.
- - Add `--metrics` flag.
-- etcd uses default route IP if advertise URL is not given.
-- Cluster rejects removing members if quorum will be lost.
-- Discovery now has upper limit for waiting on retries.
-- Warn on binding listeners through domain names; to be deprecated.
-- v3.0 and v3.1 with `--auto-compaction-retention=10` run periodic compaction on v3 key-value store for every 10-hour.
- - Compactor only supports periodic compaction.
- - Compactor records latest revisions every 5-minute, until it reaches the first compaction period (e.g. 10-hour).
- - In order to retain key-value history of last compaction period, it uses the last revision that was fetched before compaction period, from the revision records that were collected every 5-minute.
- - When `--auto-compaction-retention=10`, compactor uses revision 100 for compact revision where revision 100 is the latest revision fetched from 10 hours ago.
- - If compaction succeeds or requested revision has already been compacted, it resets period timer and starts over with new historical revision records (e.g. restart revision collect and compact for the next 10-hour period).
- - If compaction fails, it retries in 5 minutes.
-
-### client v3
-
-- Add `SetEndpoints` method; update endpoints at runtime.
-- Add `Sync` method; auto-update endpoints at runtime.
-- Add `Lease TimeToLive` API; fetch lease information.
-- replace Config.Logger field with global logger.
-- Get API responses are sorted in ascending order by default.
-
-### etcdctl v3
-
-- Add `lease timetolive` command.
-- Add `--print-value-only` flag to get command.
-- Add `--dest-prefix` flag to make-mirror command.
-- `get` command responses are sorted in ascending order by default.
-
-### gRPC Proxy
-
-- Experimental gRPC proxy feature.
-
-### Other
-
-- `recipes` now conform to sessions defined in `clientv3/concurrency`.
-- ACI has symlinks to `/usr/local/bin/etcd*`.
-
-### Go
-
-- Compile with [*Go 1.7.4*](https://golang.org/doc/devel/release.html#go1.7).
-
-
-
-
diff --git a/CHANGELOG/CHANGELOG-3.2.md b/CHANGELOG/CHANGELOG-3.2.md
deleted file mode 100644
index 095ff6e9f2a..00000000000
--- a/CHANGELOG/CHANGELOG-3.2.md
+++ /dev/null
@@ -1,1021 +0,0 @@
-
-
-Previous change logs can be found at [CHANGELOG-3.1](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.1.md).
-
-
-## v3.2.33 (TBD)
-
-
-
-## [v3.2.32](https://github.com/etcd-io/etcd/releases/tag/v3.2.32) (2021-03-28)
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.31...v3.2.32) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-### Package `wal`
-- add wal slice bound check to make sure entry index is not greater than the number of entries
-- check slice size in decodeRecord
-- fix panic when decoder not set
-
-### Package `fileutil`
-- fix constant for linux locking
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.2.31](https://github.com/etcd-io/etcd/releases/tag/v3.2.31) (2020-08-18)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.30...v3.2.31) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-### auth, etcdserver
-
-- Improve [`runtime.FDUsage` call pattern to reduce objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986).
-- [attaching a fake root token when calling `LeaseRevoke`](https://github.com/etcd-io/etcd/pull/11691).
- - fix a data corruption bug caused by lease expiration when authentication is enabled and upgrading cluster from etcd-3.2 to etcd-3.3
-
-### Package `runtime`
-
-- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214).
-
-### Metrics, Monitoring
-
-- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.2.30](https://github.com/etcd-io/etcd/releases/tag/v3.2.30) (2020-04-01)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.29...v3.2.30) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-### Package `wal`
-
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-
-### Metrics, Monitoring
-
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.2.29](https://github.com/etcd-io/etcd/releases/tag/v3.2.29) (2020-03-18)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.28...v3.2.29) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-### etcd server
-
-- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613).
-- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704).
-
-### client v3
-
-- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687).
- - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687).
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.28](https://github.com/etcd-io/etcd/releases/tag/v3.2.28) (2019-11-10)
-
-### Improved
-
-- Add `etcd --experimental-peer-skip-client-san-verification` to [skip verification of peer client address](https://github.com/etcd-io/etcd/pull/11195).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11271) Prometheus metric.
-
-### etcdserver
-
-- Fix [`wait purge file loop during shutdown`](https://github.com/etcd-io/etcd/pull/11308).
- - Previously, during shutdown etcd could accidentally remove needed wal files, resulting in catastrophic error `etcdserver: open wal error: wal: file not found.` during startup.
- - Now, etcd makes sure the purge file loop exits before server signals stop of the raft node.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.27](https://github.com/etcd-io/etcd/releases/tag/v3.2.27) (2019-09-17)
-
-### etcdctl v3
-
-- [Strip out insecure endpoints from DNS SRV records when using discovery](https://github.com/etcd-io/etcd/pull/10443) with etcdctl v2
-- Add [`etcdctl endpoint health --write-out` support](https://github.com/etcd-io/etcd/pull/9540).
- - Previously, [`etcdctl endpoint health --write-out json` did not work](https://github.com/etcd-io/etcd/issues/9532).
- - The command output is changed. Previously, if endpoint is unreachable, the command output is
- "\ is unhealthy: failed to connect: \". This change unified the error message, all error types
- now have the same output "\ is unhealthy: failed to commit proposal: \".
-- Fix [`etcdctl snapshot status` to not modify snapshot file](https://github.com/etcd-io/etcd/pull/11157).
- - For example, start etcd `v3.3.10`
- - Write some data
- - Use etcdctl `v3.3.10` to save snapshot
- - Somehow, upgrading Kubernetes fails, thus rolling back to previous version etcd `v3.2.24`
- - Run etcdctl `v3.2.24` `snapshot status` against the snapshot file saved from `v3.3.10` server
- - Run etcdctl `v3.2.24` `snapshot restore` fails with `"expected sha256 [12..."`
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Fix bug where [db_compaction_total_duration_milliseconds metric incorrectly measured duration as 0](https://github.com/etcd-io/etcd/pull/10646).
-- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.26](https://github.com/etcd-io/etcd/releases/tag/v3.2.26) (2019-01-11)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.25...v3.2.26) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### gRPC Proxy
-
-- Fix [memory leak in cache layer](https://github.com/etcd-io/etcd/pull/10327).
-
-### Security, Authentication
-
-- Disable [CommonName authentication for gRPC-gateway](https://github.com/etcd-io/etcd/pull/10366) gRPC-gateway proxy requests to etcd server use the etcd client server TLS certificate. If that certificate contains CommonName we do not want to use that for authentication as it could lead to permission escalation.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.25](https://github.com/etcd-io/etcd/releases/tag/v3.2.25) (2018-10-10)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.24...v3.2.25) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Improved
-
-- Improve ["became inactive" warning log](https://github.com/etcd-io/etcd/pull/10024), which indicates message send to a peer failed.
-- Improve [read index wait timeout warning log](https://github.com/etcd-io/etcd/pull/10026), which indicates that local node might have slow network.
-- Add [gRPC interceptor for debugging logs](https://github.com/etcd-io/etcd/pull/9990); enable `etcd --debug` flag to see per-request debug information.
-- Add [consistency check in snapshot status](https://github.com/etcd-io/etcd/pull/10109). If consistency check on snapshot file fails, `snapshot status` returns `"snapshot file integrity check failed..."` error.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Improve [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/10155) Prometheus metric to track leader heartbeats.
- - Previously, it only samples the TCP connection for snapshot messages.
-- Display all registered [gRPC metrics at start](https://github.com/etcd-io/etcd/pull/10032).
-- Add [`etcd_snap_db_fsync_duration_seconds_count`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_snap_db_save_total_duration_seconds_bucket`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_server_id`](https://github.com/etcd-io/etcd/pull/9998) Prometheus metric.
-- Add [`etcd_server_health_success`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric.
-- Add [`etcd_server_health_failures`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric.
-- Add [`etcd_server_read_indexes_failed_total`](https://github.com/etcd-io/etcd/pull/10094) Prometheus metric.
-
-### client v3
-
-- Fix logic on [release lock key if cancelled](https://github.com/etcd-io/etcd/pull/10153) in `clientv3/concurrency` package.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.24](https://github.com/etcd-io/etcd/releases/tag/v3.2.24) (2018-07-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.23...v3.2.24) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Improved
-
-- Improve [Raft Read Index timeout warning messages](https://github.com/etcd-io/etcd/pull/9897).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_server_go_version`](https://github.com/etcd-io/etcd/pull/9957) Prometheus metric.
-- Add [`etcd_server_heartbeat_send_failures_total`](https://github.com/etcd-io/etcd/pull/9942) Prometheus metric.
-- Add [`etcd_server_slow_apply_total`](https://github.com/etcd-io/etcd/pull/9942) Prometheus metric.
-- Add [`etcd_disk_backend_defrag_duration_seconds`](https://github.com/etcd-io/etcd/pull/9942) Prometheus metric.
-- Add [`etcd_mvcc_hash_duration_seconds`](https://github.com/etcd-io/etcd/pull/9942) Prometheus metric.
-- Add [`etcd_server_slow_read_indexes_total`](https://github.com/etcd-io/etcd/pull/9897) Prometheus metric.
-- Add [`etcd_server_quota_backend_bytes`](https://github.com/etcd-io/etcd/pull/9820) Prometheus metric.
- - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`.
- - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB.
- - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB.
- - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete.
- - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation.
-- Add [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) Prometheus metric.
- - In addition to [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819).
-- Add [`etcd_mvcc_db_total_size_in_use_in_bytes`](https://github.com/etcd-io/etcd/pull/9256) Prometheus metric.
- - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_server_quota_backend_bytes`.
- - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB.
- - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB.
- - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete.
- - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation.
-
-### gRPC Proxy
-
-- Add [flags for specifying TLS for connecting to proxy](https://github.com/etcd-io/etcd/pull/9894):
- - Add `grpc-proxy start --cert-file`, `grpc-proxy start --key-file` and `grpc-proxy start --trusted-ca-file` flags.
-- Add [`grpc-proxy start --metrics-addr` flag for specifying a separate metrics listen address](https://github.com/etcd-io/etcd/pull/9894).
-
-### client v3
-
-- Fix [lease keepalive interval updates when response queue is full](https://github.com/etcd-io/etcd/pull/9952).
- - If `<-chan *clientv3LeaseKeepAliveResponse` from `clientv3.Lease.KeepAlive` was never consumed or channel is full, client was [sending keepalive request every 500ms](https://github.com/etcd-io/etcd/issues/9911) instead of expected rate of every "TTL / 3" duration.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.23](https://github.com/etcd-io/etcd/releases/tag/v3.2.23) (2018-06-15)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.22...v3.2.23) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Improved
-
-- Improve [slow request apply warning log](https://github.com/etcd-io/etcd/pull/9288).
- - e.g. `read-only range request "key:\"/a\" range_end:\"/b\" " with result "range_response_count:3 size:96" took too long (97.966µs) to execute`.
- - Redact [request value field](https://github.com/etcd-io/etcd/pull/9822).
- - Provide [response size](https://github.com/etcd-io/etcd/pull/9826).
-- Add [backoff on watch retries on transient errors](https://github.com/etcd-io/etcd/pull/9840).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_server_version`](https://github.com/etcd-io/etcd/pull/8960) Prometheus metric.
- - To replace [Kubernetes `etcd-version-monitor`](https://github.com/etcd-io/etcd/issues/8948).
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.22](https://github.com/etcd-io/etcd/releases/tag/v3.2.22) (2018-06-06)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.21...v3.2.22) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Security, Authentication
-
-- Support TLS cipher suite whitelisting.
- - To block [weak cipher suites](https://github.com/etcd-io/etcd/issues/8320).
- - TLS handshake fails when client hello is requested with invalid cipher suites.
- - Add [`etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/9801) flag.
- - If empty, Go auto-populates the list.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.21](https://github.com/etcd-io/etcd/releases/tag/v3.2.21) (2018-05-31)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.20...v3.2.21) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcd server
-
-- Fix [auth storage panic when simple token provider is disabled](https://github.com/etcd-io/etcd/pull/8695).
-- Fix [`mvcc` server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775).
- - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation.
- - Now, this server-side panic has been fixed.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.20](https://github.com/etcd-io/etcd/releases/tag/v3.2.20) (2018-05-09)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.19...v3.2.20) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcd server
-
-- Purge old [`*.snap.db` snapshot files](https://github.com/etcd-io/etcd/pull/7967).
- - Previously, etcd did not respect `--max-snapshots` flag to purge old `*.snap.db` files.
- - Now, etcd purges old `*.snap.db` files to keep maximum `--max-snapshots` number of files on disk.
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.19](https://github.com/etcd-io/etcd/releases/tag/v3.2.19) (2018-04-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.18...v3.2.19) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Fix [`etcd_debugging_server_lease_expired_total`](https://github.com/etcd-io/etcd/pull/9557) Prometheus metric.
-- Fix [race conditions in v2 server stat collecting](https://github.com/etcd-io/etcd/pull/9562).
-- Add [`etcd_server_is_leader`](https://github.com/etcd-io/etcd/pull/9587) Prometheus metric.
-
-### Security, Authentication
-
-- Fix [TLS reload](https://github.com/etcd-io/etcd/pull/9570) when [certificate SAN field only includes IP addresses but no domain names](https://github.com/etcd-io/etcd/issues/9541).
- - In Go, server calls `(*tls.Config).GetCertificate` for TLS reload if and only if server's `(*tls.Config).Certificates` field is not empty, or `(*tls.ClientHelloInfo).ServerName` is not empty with a valid SNI from the client. Previously, etcd always populates `(*tls.Config).Certificates` on the initial client TLS handshake, as non-empty. Thus, client was always expected to supply a matching SNI in order to pass the TLS verification and to trigger `(*tls.Config).GetCertificate` to reload TLS assets.
- - However, a certificate whose SAN field does [not include any domain names but only IP addresses](https://github.com/etcd-io/etcd/issues/9541) would request `*tls.ClientHelloInfo` with an empty `ServerName` field, thus failing to trigger the TLS reload on initial TLS handshake; this becomes a problem when expired certificates need to be replaced online.
- - Now, `(*tls.Config).Certificates` is created empty on initial TLS client handshake, first to trigger `(*tls.Config).GetCertificate`, and then to populate rest of the certificates on every new TLS connection, even when client SNI is empty (e.g. cert only includes IPs).
-
-### etcd server
-
-- Add [`etcd --initial-election-tick-advance`](https://github.com/etcd-io/etcd/pull/9591) flag to configure initial election tick fast-forward.
- - By default, `etcd --initial-election-tick-advance=true`, then local member fast-forwards election ticks to speed up "initial" leader election trigger.
- - This benefits the case of larger election ticks. For instance, cross datacenter deployment may require longer election timeout of 10-second. If true, local node does not need wait up to 10-second. Instead, forwards its election ticks to 8-second, and have only 2-second left before leader election.
- - Major assumptions are that: cluster has no active leader thus advancing ticks enables faster leader election. Or cluster already has an established leader, and rejoining follower is likely to receive heartbeats from the leader after tick advance and before election timeout.
- - However, when network from leader to rejoining follower is congested, and the follower does not receive leader heartbeat within left election ticks, disruptive election has to happen thus affecting cluster availabilities.
- - Now, this can be disabled by setting `--initial-election-tick-advance=false`.
- - Disabling this would slow down initial bootstrap process for cross datacenter deployments. Make tradeoffs by configuring `--initial-election-tick-advance` at the cost of slow initial bootstrap.
- - If single-node, it advances ticks regardless.
- - Address [disruptive rejoining follower node](https://github.com/etcd-io/etcd/issues/9333).
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.18](https://github.com/etcd-io/etcd/releases/tag/v3.2.18) (2018-03-29)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.17...v3.2.18) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Improved
-
-- Adjust [election timeout on server restart](https://github.com/etcd-io/etcd/pull/9415) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333).
- - Previously, etcd fast-forwards election ticks on server start, with only one tick left for leader election. This is to speed up start phase, without having to wait until all election ticks elapse. Advancing election ticks is useful for cross datacenter deployments with larger election timeouts. However, it was affecting cluster availability if the last tick elapses before leader contacts the restarted node.
- - Now, when etcd restarts, it adjusts election ticks with more than one tick left, thus more time for leader to prevent disruptive restart.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add missing [`etcd_network_peer_sent_failures_total` count](https://github.com/etcd-io/etcd/pull/9437).
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.17](https://github.com/etcd-io/etcd/releases/tag/v3.2.17) (2018-03-08)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.16...v3.2.17) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcd server
-
-- Fix [server panic on invalid Election Proclaim/Resign HTTP(S) requests](https://github.com/etcd-io/etcd/pull/9379).
- - Previously, wrong-formatted HTTP requests to Election API could trigger panic in etcd server.
- - e.g. `curl -L http://localhost:2379/v3/election/proclaim -X POST -d '{"value":""}'`, `curl -L http://localhost:2379/v3/election/resign -X POST -d '{"value":""}'`.
-- Prevent [overflow by large `TTL` values for `Lease` `Grant`](https://github.com/etcd-io/etcd/pull/9399).
- - `TTL` parameter to `Grant` request is unit of second.
- - Leases with too large `TTL` values exceeding `math.MaxInt64` [expire in unexpected ways](https://github.com/etcd-io/etcd/issues/9374).
- - Server now returns `rpctypes.ErrLeaseTTLTooLarge` to client, when the requested `TTL` is larger than *9,000,000,000 seconds* (which is >285 years).
- - Again, etcd `Lease` is meant for short-periodic keepalives or sessions, in the range of seconds or minutes. Not for hours or days!
-- Enable etcd server [`raft.Config.CheckQuorum` when starting with `ForceNewCluster`](https://github.com/etcd-io/etcd/pull/9347).
-
-### Proxy v2
-
-- Fix [v2 proxy leaky HTTP requests](https://github.com/etcd-io/etcd/pull/9336).
-
-### Go
-
-- Compile with [*Go 1.8.7*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.16](https://github.com/etcd-io/etcd/releases/tag/v3.2.16) (2018-02-12)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.15...v3.2.16) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcd server
-
-- Fix [`mvcc` "unsynced" watcher restore operation](https://github.com/etcd-io/etcd/pull/9297).
- - "unsynced" watcher is watcher that needs to be in sync with events that have happened.
- - That is, "unsynced" watcher is the slow watcher that was requested on old revision.
- - "unsynced" watcher restore operation was not correctly populating its underlying watcher group.
- - Which possibly causes [missing events from "unsynced" watchers](https://github.com/etcd-io/etcd/issues/9086).
- - A node gets network partitioned with a watcher on a future revision, and falls behind receiving a leader snapshot after partition gets removed. When applying this snapshot, etcd watch storage moves current synced watchers to unsynced since sync watchers might have become stale during network partition. And reset synced watcher group to restart watcher routines. Previously, there was a bug when moving from synced watcher group to unsynced, thus client would miss events when the watcher was requested to the network-partitioned node.
-
-### Go
-
-- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.15](https://github.com/etcd-io/etcd/releases/tag/v3.2.15) (2018-01-22)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.14...v3.2.15) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcd server
-
-- Prevent [server panic from member update/add](https://github.com/etcd-io/etcd/pull/9174) with [wrong scheme URLs](https://github.com/etcd-io/etcd/issues/9173).
-- Log [user context cancel errors on stream APIs in debug level with TLS](https://github.com/etcd-io/etcd/pull/9178).
-
-### Go
-
-- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.14](https://github.com/etcd-io/etcd/releases/tag/v3.2.14) (2018-01-11)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.13...v3.2.14) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Improved
-
-- Log [user context cancel errors on stream APIs in debug level](https://github.com/etcd-io/etcd/pull/9105).
-
-### etcd server
-
-- Fix [`mvcc/backend.defragdb` nil-pointer dereference on create bucket failure](https://github.com/etcd-io/etcd/pull/9119).
-
-### Go
-
-- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.13](https://github.com/etcd-io/etcd/releases/tag/v3.2.13) (2018-01-02)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.12...v3.2.13) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcd server
-
-- Remove [verbose error messages on stream cancel and gRPC info-level logs](https://github.com/etcd-io/etcd/pull/9080) in server-side.
-- Fix [gRPC server panic on `GracefulStop` TLS-enabled server](https://github.com/etcd-io/etcd/pull/8987).
-
-### Go
-
-- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.12](https://github.com/etcd-io/etcd/releases/tag/v3.2.12) (2017-12-20)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.11...v3.2.12) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Dependency
-
-- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases/tag) from [**`v1.7.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.4) to [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5).
-- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [**`v1.3`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3) to [**`v1.3.0`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3.0).
-
-### etcd server
-
-- Fix [error message of `Revision` compactor](https://github.com/etcd-io/etcd/pull/8999) in server-side.
-
-### client v3
-
-- Add [`MaxCallSendMsgSize` and `MaxCallRecvMsgSize`](https://github.com/etcd-io/etcd/pull/9047) fields to [`clientv3.Config`](https://godoc.org/github.com/etcd-io/etcd/clientv3#Config).
- - Fix [exceeded response size limit error in client-side](https://github.com/etcd-io/etcd/issues/9043).
- - Address [kubernetes#51099](https://github.com/kubernetes/kubernetes/issues/51099).
- - In previous versions(v3.2.10, v3.2.11), client response size was limited to only 4 MiB.
- - `MaxCallSendMsgSize` default value is 2 MiB, if not configured.
- - `MaxCallRecvMsgSize` default value is `math.MaxInt32`, if not configured.
-
-### Go
-
-- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.11](https://github.com/etcd-io/etcd/releases/tag/v3.2.11) (2017-12-05)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.10...v3.2.11) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Dependency
-
-- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases/tag) from [**`v1.7.3`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.3) to [**`v1.7.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.4).
-
-### Security, Authentication
-
-See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details.
-
-- Log [more details on TLS handshake failures](https://github.com/etcd-io/etcd/pull/8952/files).
-
-### client v3
-
-- Fix racey grpc-go's server handler transport `WriteStatus` call to prevent [TLS-enabled etcd server crash](https://github.com/etcd-io/etcd/issues/8904).
-- Add [gRPC RPC failure warnings](https://github.com/etcd-io/etcd/pull/8939) to help debug such issues in the future.
-
-### Documentation
-
-- Remove `--listen-metrics-urls` flag in monitoring document (non-released in `v3.2.x`, planned for `v3.3.x`).
-
-### Go
-
-- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.10](https://github.com/etcd-io/etcd/releases/tag/v3.2.10) (2017-11-16)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.9...v3.2.10) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Dependency
-
-- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases/tag) from [**`v1.2.1`**](https://github.com/grpc/grpc-go/releases/tag/v1.2.1) to [**`v1.7.3`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.3).
-- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [**`v1.2.0`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.2.0) to [**`v1.3`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3).
-
-### Security, Authentication
-
-See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details.
-
-- Revert [discovery SRV auth `ServerName` with `*.{ROOT_DOMAIN}`](https://github.com/etcd-io/etcd/pull/8651) to support non-wildcard subject alternative names in the certs (see [issue #8445](https://github.com/etcd-io/etcd/issues/8445) for more contexts).
- - For instance, `etcd --discovery-srv=etcd.local` will only authenticate peers/clients when the provided certs have root domain `etcd.local` (**not `*.etcd.local`**) as an entry in Subject Alternative Name (SAN) field.
-
-### etcd server
-
-- Replace backend key-value database `boltdb/bolt` with [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) to address [backend database size issue](https://github.com/etcd-io/etcd/issues/8009).
-
-### client v3
-
-- Rewrite balancer to handle [network partitions](https://github.com/etcd-io/etcd/issues/8711).
-
-### Go
-
-- Compile with [*Go 1.8.5*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.9](https://github.com/etcd-io/etcd/releases/tag/v3.2.9) (2017-10-06)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.8...v3.2.9) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Security, Authentication
-
-See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details.
-
-- Update `golang.org/x/crypto/bcrypt` (see [golang/crypto@6c586e1](https://github.com/golang/crypto/commit/6c586e17d90a7d08bbbc4069984180dce3b04117)).
-- Fix discovery SRV bootstrapping to [authenticate `ServerName` with `*.{ROOT_DOMAIN}`](https://github.com/etcd-io/etcd/pull/8651), in order to support sub-domain wildcard matching (see [issue #8445](https://github.com/etcd-io/etcd/issues/8445) for more contexts).
- - For instance, `etcd --discovery-srv=etcd.local` will only authenticate peers/clients when the provided certs have root domain `*.etcd.local` as an entry in Subject Alternative Name (SAN) field.
-
-### Go
-
-- Compile with [*Go 1.8.4*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.8](https://github.com/etcd-io/etcd/releases/tag/v3.2.8) (2017-09-29)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.7...v3.2.8) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### client v2
-
-- Fix v2 client failover to next endpoint on mutable operation.
-
-### gRPC Proxy
-
-- Handle [`KeysOnly` flag](https://github.com/etcd-io/etcd/pull/8552).
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.7](https://github.com/etcd-io/etcd/releases/tag/v3.2.7) (2017-09-01)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.6...v3.2.7) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Security, Authentication
-
-- Fix [server-side auth so concurrent auth operations do not return old revision error](https://github.com/etcd-io/etcd/pull/8306).
-
-### client v3
-
-- Fix [`concurrency/stm` Put with serializable snapshot](https://github.com/etcd-io/etcd/pull/8439).
- - Use store revision from first fetch to resolve write conflicts instead of modified revision.
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.6](https://github.com/etcd-io/etcd/releases/tag/v3.2.6) (2017-08-21)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.5...v3.2.6) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcd server
-
-- Fix watch restore from snapshot.
-- Fix multiple URLs for `--listen-peer-urls` flag.
-- Add `--enable-pprof` flag to etcd configuration file format.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Fix `etcd_debugging_mvcc_keys_total` inconsistency.
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.5](https://github.com/etcd-io/etcd/releases/tag/v3.2.5) (2017-08-04)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.4...v3.2.5) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcdctl v3
-
-- Return non-zero exit code on unhealthy `endpoint health`.
-
-### Security, Authentication
-
-See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details.
-
-- [Server supports reverse-lookup on wildcard DNS `SAN`](https://github.com/etcd-io/etcd/pull/8281). For instance, if peer cert contains only DNS names (no IP addresses) in Subject Alternative Name (SAN) field, server first reverse-lookups the remote IP address to get a list of names mapping to that address (e.g. `nslookup IPADDR`). Then accepts the connection if those names have a matching name with peer cert's DNS names (either by exact or wildcard match). If none is matched, server forward-lookups each DNS entry in peer cert (e.g. look up `example.default.svc` when the entry is `*.example.default.svc`), and accepts connection only when the host's resolved addresses have the matching IP address with the peer's remote IP address. For example, peer B's CSR (with `cfssl`) SAN field is `["*.example.default.svc", "*.example.default.svc.cluster.local"]` when peer B's remote IP address is `10.138.0.2`. When peer B tries to join the cluster, peer A reverse-lookup the IP `10.138.0.2` to get the list of host names. And either exact or wildcard match the host names with peer B's cert DNS names in Subject Alternative Name (SAN) field. If none of reverse/forward lookups worked, it returns an error `"tls: "10.138.0.2" does not match any of DNSNames ["*.example.default.svc","*.example.default.svc.cluster.local"]`. See [issue#8268](https://github.com/etcd-io/etcd/issues/8268) for more detail.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Fix unreachable `/metrics` endpoint when `--enable-v2=false`.
-
-### gRPC Proxy
-
-- Handle [`PrevKv` flag](https://github.com/etcd-io/etcd/pull/8366).
-
-### Other
-
-- Add container registry `gcr.io/etcd-development/etcd`.
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.4](https://github.com/etcd-io/etcd/releases/tag/v3.2.4) (2017-07-19)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.3...v3.2.4) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcd server
-
-- Do not block on active client stream when stopping server
-
-### gRPC proxy
-
-- Fix gRPC proxy Snapshot RPC error handling
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.3](https://github.com/etcd-io/etcd/releases/tag/v3.2.3) (2017-07-14)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.2...v3.2.3) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### client v3
-
-- Let clients establish unlimited streams
-
-### Other
-
-- Tag docker images with minor versions
- - e.g. `docker pull quay.io/coreos/etcd:v3.2` to fetch latest v3.2 versions
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.2](https://github.com/etcd-io/etcd/releases/tag/v3.2.2) (2017-07-07)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.1...v3.2.2) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Improved
-
-- Rate-limit lease revoke on expiration.
-- Extend leases on promote to avoid queueing effect on lease expiration.
-
-### Security, Authentication
-
-See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details.
-
-- [Server accepts connections if IP matches, without checking DNS entries](https://github.com/etcd-io/etcd/pull/8223). For instance, if peer cert contains IP addresses and DNS names in Subject Alternative Name (SAN) field, and the remote IP address matches one of those IP addresses, server just accepts connection without further checking the DNS names. For example, peer B's CSR (with `cfssl`) SAN field is `["invalid.domain", "10.138.0.2"]` when peer B's remote IP address is `10.138.0.2` and `invalid.domain` is a invalid host. When peer B tries to join the cluster, peer A successfully authenticates B, since Subject Alternative Name (SAN) field has a valid matching IP address. See [issue#8206](https://github.com/etcd-io/etcd/issues/8206) for more detail.
-
-### etcd server
-
-- Accept connection with matched IP SAN but no DNS match.
- - Don't check DNS entries in certs if there's a matching IP.
-
-### gRPC gateway
-
-- Use user-provided listen address to connect to gRPC gateway.
- - `net.Listener` rewrites IPv4 0.0.0.0 to IPv6 [::], breaking IPv6 disabled hosts.
- - Only v3.2.0, v3.2.1 are affected.
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.1](https://github.com/etcd-io/etcd/releases/tag/v3.2.1) (2017-06-23)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.0...v3.2.1) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### etcd server
-
-- Fix backend database in-memory index corruption issue on restore (only 3.2.0 is affected).
-
-### gRPC gateway
-
-- Fix Txn marshaling.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Fix backend database size debugging metrics.
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
-
-## [v3.2.0](https://github.com/etcd-io/etcd/releases/tag/v3.2.0) (2017-06-09)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.1.0...v3.2.0) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_2/).**
-
-### Improved
-
-- Improve backend read concurrency.
-
-### Breaking Changes
-
-- Increased [`--snapshot-count` default value from 10,000 to 100,000](https://github.com/etcd-io/etcd/pull/7160).
- - Higher snapshot count means it holds Raft entries in memory for longer before discarding old entries.
- - It is a trade-off between less frequent snapshotting and [higher memory usage](https://github.com/kubernetes/kubernetes/issues/60589#issuecomment-371977156).
- - User lower `--snapshot-count` value for lower memory usage.
- - User higher `--snapshot-count` value for better availabilities of slow followers (less frequent snapshots from leader).
-- `clientv3.Lease.TimeToLive` returns `LeaseTimeToLiveResponse.TTL == -1` on lease not found.
-- `clientv3.NewFromConfigFile` is moved to `clientv3/yaml.NewConfig`.
-- `embed.Etcd.Peers` field is now `[]*peerListener`.
-- Rejects domains names for `--listen-peer-urls` and `--listen-client-urls` (3.1 only prints out warnings), since [domain name is invalid for network interface binding](https://github.com/etcd-io/etcd/issues/6336).
-
-### Dependency
-
-- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.0.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.0.4) to [**`v1.2.1`**](https://github.com/grpc/grpc-go/releases/tag/v1.2.1).
-- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) to [**`v1.2.0`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.2.0).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_disk_backend_snapshot_duration_seconds`](https://github.com/etcd-io/etcd/pull/7892)
-- Add `etcd_debugging_server_lease_expired_total` metrics.
-
-### Security, Authentication
-
-See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details.
-
-- [TLS certificates get reloaded on every client connection](https://github.com/etcd-io/etcd/pull/7829). This is useful when replacing expiry certs without stopping etcd servers; it can be done by overwriting old certs with new ones. Refreshing certs for every connection should not have too much overhead, but can be improved in the future, with caching layer. Example tests can be found [here](https://github.com/etcd-io/etcd/blob/b041ce5d514a4b4aaeefbffb008f0c7570a18986/integration/v3_grpc_test.go#L1601-L1757).
-- [Server denies incoming peer certs with wrong IP `SAN`](https://github.com/etcd-io/etcd/pull/7687). For instance, if peer cert contains any IP addresses in Subject Alternative Name (SAN) field, server authenticates a peer only when the remote IP address matches one of those IP addresses. This is to prevent unauthorized endpoints from joining the cluster. For example, peer B's CSR (with `cfssl`) SAN field is `["*.example.default.svc", "*.example.default.svc.cluster.local", "10.138.0.27"]` when peer B's actual IP address is `10.138.0.2`, not `10.138.0.27`. When peer B tries to join the cluster, peer A will reject B with the error `x509: certificate is valid for 10.138.0.27, not 10.138.0.2`, because B's remote IP address does not match the one in Subject Alternative Name (SAN) field.
-- [Server resolves TLS `DNSNames` when checking `SAN`](https://github.com/etcd-io/etcd/pull/7767). For instance, if peer cert contains only DNS names (no IP addresses) in Subject Alternative Name (SAN) field, server authenticates a peer only when forward-lookups (`dig b.com`) on those DNS names have matching IP with the remote IP address. For example, peer B's CSR (with `cfssl`) SAN field is `["b.com"]` when peer B's remote IP address is `10.138.0.2`. When peer B tries to join the cluster, peer A looks up the incoming host `b.com` to get the list of IP addresses (e.g. `dig b.com`). And rejects B if the list does not contain the IP `10.138.0.2`, with the error `tls: 10.138.0.2 does not match any of DNSNames ["b.com"]`.
-- Auth support JWT token.
-
-### etcd server
-
-- RPCs
- - Add Election, Lock service.
-- Native client `etcdserver/api/v3client`
- - client "embedded" in the server.
-- Logging, monitoring
- - Server warns large snapshot operations.
-- Add `etcd --enable-v2` flag to enable v2 API server.
- - `etcd --enable-v2=true` by default.
-- Add `etcd --auth-token` flag.
-- v3.2 compactor runs [every hour](https://github.com/etcd-io/etcd/pull/7875).
- - Compactor only supports periodic compaction.
- - Compactor continues to record latest revisions every 5-minute.
- - For every hour, it uses the last revision that was fetched before compaction period, from the revision records that were collected every 5-minute.
- - That is, for every hour, compactor discards historical data created before compaction period.
- - The retention window of compaction period moves to next hour.
- - For instance, when hourly writes are 100 and `--auto-compaction-retention=10`, v3.1 compacts revision 1000, 2000, and 3000 for every 10-hour, while v3.2 compacts revision 1000, 1100, and 1200 for every 1-hour.
- - If compaction succeeds or requested revision has already been compacted, it resets period timer and removes used compacted revision from historical revision records (e.g. start next revision collect and compaction from previously collected revisions).
- - If compaction fails, it retries in 5 minutes.
-- Allow snapshot over 512MB.
-
-### client v3
-
-- STM prefetching.
-- Add namespace feature.
-- Add `ErrOldCluster` with server version checking.
-- Translate `WithPrefix()` into `WithFromKey()` for empty key.
-
-### etcdctl v3
-
-- Add `check perf` command.
-- Add `etcdctl --from-key` flag to role grant-permission command.
-- `lock` command takes an optional command to execute.
-
-### gRPC Proxy
-
-- Proxy endpoint discovery.
-- Namespaces.
-- Coalesce lease requests.
-
-### etcd gateway
-
-- Support [DNS SRV priority](https://github.com/etcd-io/etcd/pull/7882) for [smart proxy routing](https://github.com/etcd-io/etcd/issues/4378).
-
-### Other
-
-- v3 client
- - concurrency package's elections updated to match RPC interfaces.
- - let client dial endpoints not in the balancer.
-- Release
- - Annotate acbuild with supports-systemd-notify.
- - Add `nsswitch.conf` to Docker container image.
- - Add ppc64le, arm64(experimental) builds.
-
-### Go
-
-- Compile with [*Go 1.8.3*](https://golang.org/doc/devel/release.html#go1.8).
-
-
-
-
diff --git a/CHANGELOG/CHANGELOG-3.3.md b/CHANGELOG/CHANGELOG-3.3.md
deleted file mode 100644
index 8addba112f6..00000000000
--- a/CHANGELOG/CHANGELOG-3.3.md
+++ /dev/null
@@ -1,1121 +0,0 @@
-
-
-Previous change logs can be found at [CHANGELOG-3.2](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.2.md).
-
-
-
-## v3.3.27 (2021-10-15)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.26...v3.3.27) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### Other
-
-- Updated [base image](https://github.com/etcd-io/etcd/pull/13386) from `debian:buster-v1.4.0` to `debian:bullseye-20210927` to fix the following critical CVEs:
- - [CVE-2021-3711](https://nvd.nist.gov/vuln/detail/CVE-2021-3711): miscalculation of a buffer size in openssl's SM2 decryption
- - [CVE-2021-35942](https://nvd.nist.gov/vuln/detail/CVE-2021-35942): integer overflow flaw in glibc
- - [CVE-2019-9893](https://nvd.nist.gov/vuln/detail/CVE-2019-9893): incorrect syscall argument generation in libseccomp
- - [CVE-2021-36159](https://nvd.nist.gov/vuln/detail/CVE-2021-36159): libfetch in apk-tools mishandles numeric strings in FTP and HTTP protocols to allow out of bound reads.
-
-
-
-## v3.3.26 (2021-10-03)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.25...v3.3.26) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### Package `clientv3`
-
-- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready.
-
-### Package `fileutil`
-
-- Fix [constant](https://github.com/etcd-io/etcd/pull/12440) for linux locking.
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-## v3.3.25 (2020-08-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.23...v3.3.25) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### Security
-
-- A [log warning](https://github.com/etcd-io/etcd/pull/12242) is added when etcd use any existing directory that has a permission different than 700 on Linux and 777 on Windows.
-
-
-## [v3.3.24](https://github.com/etcd-io/etcd/releases/tag/v3.3.24) (2020-08-18)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.23...v3.3.24) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### Package `etcd server`
-
-- Fix [`int64` convert panic in raft logger](https://github.com/etcd-io/etcd/pull/12106).
- - Fix [kubernetes/kubernetes#91937](https://github.com/kubernetes/kubernetes/issues/91937).
-
-### Package `runtime`
-
-- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214).
-
-### Metrics, Monitoring
-
-- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-
-
-## [v3.3.23](https://github.com/etcd-io/etcd/releases/tag/v3.3.23) (2020-07-16)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.22...v3.3.23) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### Breaking Changes
-
-- Fix [incorrect package dependency when etcd clientv3 used as libary](https://github.com/etcd-io/etcd/issues/12068).
-- Changed behavior on [existing dir permission](https://github.com/etcd-io/etcd/pull/11798).
- - Previously, the permission was not checked on existing data directory and the directory used for automatically generating self-signed certificates for TLS connections with clients. Now a check is added to make sure those directories, if already exist, has a desired permission of 700 on Linux and 777 on Windows.
-
-### Package `wal`
-
-### etcd server
-- Fix [watch stream got closed if one watch request is not permitted](https://github.com/etcd-io/etcd/pull/11758).
-- Add [etcd --auth-token-ttl](https://github.com/etcd-io/etcd/pull/11980) flag to customize `simpleTokenTTL` settings.
-- Improve [runtime.FDUsage objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986).
-- Improve [mvcc.watchResponse channel Memory Usage](https://github.com/etcd-io/etcd/pull/11987).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-
-## [v3.3.22](https://github.com/etcd-io/etcd/releases/tag/v3.3.22) (2020-05-20)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.21...v3.3.22) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### Package `wal`
-
-- Add [missing CRC checksum check in WAL validate method otherwise causes panic](https://github.com/etcd-io/etcd/pull/11924).
- - See https://github.com/etcd-io/etcd/issues/11918.
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.3.21](https://github.com/etcd-io/etcd/releases/tag/v3.3.21) (2020-05-18)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.20...v3.3.21) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### `etcdctl`
-
-- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
-
-### Package `clientv3`
-
-- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
-
-### etcd server
-
-- Improve logging around snapshot send and receive.
-- [Add log when etcdserver failed to apply command](https://github.com/etcd-io/etcd/pull/11670).
-- [Fix deadlock bug in mvcc](https://github.com/etcd-io/etcd/pull/11817).
-- Fix [inconsistency between WAL and server snapshot](https://github.com/etcd-io/etcd/pull/11888).
- - Previously, server restore fails if it had crashed after persisting raft hard state but before saving snapshot.
- - See https://github.com/etcd-io/etcd/issues/10219 for more.
-
-### Package `auth`
-
-- [Fix a data corruption bug by saving consistent index](https://github.com/etcd-io/etcd/pull/11652).
-
-### Metrics, Monitoring
-
-- Add [`etcd_debugging_auth_revision`](https://github.com/etcd-io/etcd/commit/f14d2a087f7b0fd6f7980b95b5e0b945109c95f3).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.3.20](https://github.com/etcd-io/etcd/releases/tag/v3.3.20) (2020-04-01)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.19...v3.3.20) and [v3.2 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### Package `wal`
-
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-
-### Metrics, Monitoring
-
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.3.19](https://github.com/etcd-io/etcd/releases/tag/v3.3.19) (2020-03-18)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.18...v3.3.19) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### client v3
-
-- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687).
- - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys.
-
-### etcd server
-
-- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613).
-- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704).
-
-### etcdctl v3
-
-- Fix [`etcdctl member add`](https://github.com/etcd-io/etcd/pull/11638) command to prevent potential timeout.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687).
-
-### gRPC Proxy
-
-- Fix [`panic on error`](https://github.com/etcd-io/etcd/pull/11694) for metrics handler.
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.3.18](https://github.com/etcd-io/etcd/releases/tag/v3.3.18) (2019-11-26)
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11261) Prometheus metric.
-- Add [`etcd_debugging_mvcc_total_put_size_in_bytes`](https://github.com/etcd-io/etcd/pull/11374) Prometheus metric.
-
-### etcdserver
-
-- Fix [`wait purge file loop during shutdown`](https://github.com/etcd-io/etcd/pull/11308).
- - Previously, during shutdown etcd could accidentally remove needed wal files, resulting in catastrophic error `etcdserver: open wal error: wal: file not found.` during startup.
- - Now, etcd makes sure the purge file loop exits before server signals stop of the raft node.
-
-
-
-
-
-## [v3.3.17](https://github.com/etcd-io/etcd/releases/tag/v3.3.17) (2019-10-11)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.16...v3.3.17) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-### Release details
-
-This release replaces 3.3.16.
-
-Due to the etcd 3.3.16 release being incorrectly released (see details below), please use this release instead.
-
-
-
-
-
-## [v3.3.16](https://github.com/etcd-io/etcd/releases/tag/v3.3.16) (2019-10-10)
-
-**WARNING: This is a bad release! Please use etcd 3.3.17 instead. See https://github.com/etcd-io/etcd/issues/11241 for details.**
-
-### Issues with release
-
-- go mod for 'v3.3.16' may return a different hash if retrieved from a go mod proxy than if retrieved directly from github. Depending on this version is unsafe. See https://github.com/etcd-io/etcd/issues/11241 for details.
-- The binaries and docker image for this release have been published and will be left as-is, but will not be signed since this is a bad release.
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.15...v3.3.16) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Improved
-
-- Add `etcd --experimental-peer-skip-client-san-verification` to [skip verification of peer client address](https://github.com/etcd-io/etcd/pull/11196).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-
-### Dependency
-
-- Upgrade [`github.com/coreos/bbolt`](https://github.com/etcd-io/bbolt/releases) from [**`v1.3.1-coreos.6`**](https://github.com/etcd-io/bbolt/releases/tag/v1.3.1-coreos.6) to [**`v1.3.3`**](https://github.com/etcd-io/bbolt/releases/tag/v1.3.3).
-
-### etcdctl v3
-
-- Fix [`etcdctl member add`](https://github.com/etcd-io/etcd/pull/11194) command to prevent potential timeout.
-
-### Go
-
-- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes.
-
-### client v3
-
-- Fix [client balancer failover against multiple endpoints](https://github.com/etcd-io/etcd/pull/11184).
- - Fix ["kube-apiserver: failover on multi-member etcd cluster fails certificate check on DNS mismatch" (kubernetes#83028)](https://github.com/kubernetes/kubernetes/issues/83028).
-- Fix [IPv6 endpoint parsing in client](https://github.com/etcd-io/etcd/pull/11211).
- - Fix ["1.16: etcd client does not parse IPv6 addresses correctly when members are joining" (kubernetes#83550)](https://github.com/kubernetes/kubernetes/issues/83550).
-
-
-
-
-
-## [v3.3.15](https://github.com/etcd-io/etcd/releases/tag/v3.3.15) (2019-08-19)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.14...v3.3.15) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-NOTE: This patch release had to include some new features from 3.4, while trying to minimize the difference between client balancer implementation. This release fixes ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102).
-
-### Breaking Changes
-
-- Revert "Migrate dependency management tool from `glide` to [Go module](https://github.com/etcd-io/etcd/pull/10063)".
- - Now, etcd >= v3.3.15 uses `glide` for dependency management.
- - See [kubernetes#81434](https://github.com/kubernetes/kubernetes/pull/81434) for more contexts.
-
-### Go
-
-- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045).
-- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes.
-
-
-
-
-
-## [v3.3.14](https://github.com/etcd-io/etcd/releases/tag/v3.3.14) (2019-08-16)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.13...v3.3.14) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-- [v3.3.14-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.14-rc.0) (2019-08-15), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.14-beta.0...v3.3.14-rc.0).
-- [v3.3.14-beta.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.14-beta.0) (2019-08-14), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.13...v3.3.14-beta.0).
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-NOTE: This patch release had to include some new features from 3.4, while trying to minimize the difference between client balancer implementation. This release fixes ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102).
-
-### Breaking Changes
-
-- Rewrite [client balancer](https://github.com/etcd-io/etcd/pull/9860) with [new gRPC balancer interface](https://github.com/etcd-io/etcd/issues/9106).
- - Upgrade [gRPC to v1.23.0](https://github.com/etcd-io/etcd/pull/10911).
- - Improve [client balancer failover against secure endpoints](https://github.com/etcd-io/etcd/pull/10911).
- - Fix ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102).
- - [The new client balancer](https://etcd.io/docs/latest/learning/design-client/) uses an asynchronous resolver to pass endpoints to the gRPC dial function. to block until the underlying connection is up, pass `grpc.WithBlock()` to `clientv3.Config.DialOptions`.
-- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045).
-- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes.
-- Migrate dependency management tool from `glide` to [Go module](https://github.com/etcd-io/etcd/pull/10063).
- - <= 3.3 puts `vendor` directory under `cmd/vendor` directory to [prevent conflicting transitive dependencies](https://github.com/etcd-io/etcd/issues/4913).
- - 3.4 moves `cmd/vendor` directory to `vendor` at repository root.
- - Remove recursive symlinks in `cmd` directory.
- - Now `go get/install/build` on `etcd` packages (e.g. `clientv3`, `tools/benchmark`) enforce builds with etcd `vendor` directory.
-- Deprecated `latest` [release container](https://console.cloud.google.com/gcr/images/etcd-development/GLOBAL/etcd) tag.
- - **`docker pull gcr.io/etcd-development/etcd:latest` would not be up-to-date**.
-- Deprecated [minor](https://semver.org/) version [release container](https://console.cloud.google.com/gcr/images/etcd-development/GLOBAL/etcd) tags.
- - `docker pull gcr.io/etcd-development/etcd:v3.3` would still work but may be stale.
- - **`docker pull gcr.io/etcd-development/etcd:v3.4` would not work**.
- - Use **`docker pull gcr.io/etcd-development/etcd:v3.3.14`** instead, with the exact patch version.
-- Deprecated [ACIs from official release](https://github.com/etcd-io/etcd/pull/9059).
- - [AppC was officially suspended](https://github.com/appc/spec#-disclaimer-), as of late 2016.
- - [`acbuild`](https://github.com/containers/build#this-project-is-currently-unmaintained) is not maintained anymore.
- - `*.aci` files are not available from `v3.4` release.
-
-### etcd server
-
-- Add [`rpctypes.ErrLeaderChanged`](https://github.com/etcd-io/etcd/pull/10094).
- - Now linearizable requests with read index would fail fast when there is a leadership change, instead of waiting until context timeout.
-- Fix [race condition in `rafthttp` transport pause/resume](https://github.com/etcd-io/etcd/pull/10826).
-
-### API
-
-- Add [`watch_id` field to `etcdserverpb.WatchCreateRequest`](https://github.com/etcd-io/etcd/pull/9065) to allow user-provided watch ID to `mvcc`.
- - Corresponding `watch_id` is returned via `etcdserverpb.WatchResponse`, if any.
-- Add [`fragment` field to `etcdserverpb.WatchCreateRequest`](https://github.com/etcd-io/etcd/pull/9291) to request etcd server to [split watch events](https://github.com/etcd-io/etcd/issues/9294) when the total size of events exceeds `etcd --max-request-bytes` flag value plus gRPC-overhead 512 bytes.
- - The default server-side request bytes limit is `embed.DefaultMaxRequestBytes` which is 1.5 MiB plus gRPC-overhead 512 bytes.
- - If watch response events exceed this server-side request limit and watch request is created with `fragment` field `true`, the server will split watch events into a set of chunks, each of which is a subset of watch events below server-side request limit.
- - Useful when client-side has limited bandwidths.
- - For example, watch response contains 10 events, where each event is 1 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB. Then, server will send 10 separate fragmented events to the client.
- - For example, watch response contains 5 events, where each event is 2 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB and `clientv3.Config.MaxCallRecvMsgSize` is 1 MiB. Then, server will try to send 5 separate fragmented events to the client, and the client will error with `"code = ResourceExhausted desc = grpc: received message larger than max (...)"`.
- - Client must implement fragmented watch event merge (which `clientv3` does in etcd v3.4).
-- Add [`WatchRequest.WatchProgressRequest`](https://github.com/etcd-io/etcd/pull/9869).
- - To manually trigger broadcasting watch progress event (empty watch response with latest header) to all associated watch streams.
- - Think of it as `WithProgressNotify` that can be triggered manually.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_network_snapshot_send_inflights_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_inflights_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric.
-- Add [`etcd_server_snapshot_apply_in_progress_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric.
-
-### client v3
-
-- Fix [gRPC panic "send on closed channel](https://github.com/etcd-io/etcd/issues/9956) by upgrading [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5) to [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0).
-- Rewrite [client balancer](https://github.com/etcd-io/etcd/pull/9860) with [new gRPC balancer interface](https://github.com/etcd-io/etcd/issues/9106).
- - Upgrade [gRPC to v1.23.0](https://github.com/etcd-io/etcd/pull/10911).
- - Improve [client balancer failover against secure endpoints](https://github.com/etcd-io/etcd/pull/10911).
- - Fix ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102).
- - [The new client balancer](https://etcd.io/docs/latest/learning/design-client/) uses an asynchronous resolver to pass endpoints to the gRPC dial function. to block until the underlying connection is up, pass `grpc.WithBlock()` to `clientv3.Config.DialOptions`.
-
-### etcdctl v3
-
-- Add [`etcdctl endpoint health --write-out` support](https://github.com/etcd-io/etcd/pull/9540).
- - Previously, [`etcdctl endpoint health --write-out json` did not work](https://github.com/etcd-io/etcd/issues/9532).
- - The command output is changed. Previously, if endpoint is unreachable, the command output is
- "\ is unhealthy: failed to connect: \". This change unified the error message, all error types
- now have the same output "\ is unhealthy: failed to commit proposal: \".
-- Add [missing newline in `etcdctl endpoint health`](https://github.com/etcd-io/etcd/pull/10793).
-
-### Package `pkg/adt`
-
-- Change [`pkg/adt.IntervalTree` from `struct` to `interface`](https://github.com/etcd-io/etcd/pull/10959).
- - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt).
-- Improve [`pkg/adt.IntervalTree` test coverage](https://github.com/etcd-io/etcd/pull/10959).
- - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt).
-- Fix [Red-Black tree to maintain black-height property](https://github.com/etcd-io/etcd/pull/10978).
- - Previously, delete operation violates [black-height property](https://github.com/etcd-io/etcd/issues/10965).
-
-### Go
-
-- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045).
-- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes.
-
-
-
-
-
-## [v3.3.13](https://github.com/etcd-io/etcd/releases/tag/v3.3.13) (2019-05-02)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.12...v3.3.13) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Improved
-
-- Improve [heartbeat send failure logging](https://github.com/etcd-io/etcd/pull/10663).
-- Add [`Verify` function to perform corruption check on WAL contents](https://github.com/etcd-io/etcd/pull/10603).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Fix bug where [db_compaction_total_duration_milliseconds metric incorrectly measured duration as 0](https://github.com/etcd-io/etcd/pull/10646).
-
-### client v3
-
-- Fix [`(*Client).Endpoints()` method race condition](https://github.com/etcd-io/etcd/pull/10595).
-
-### Package `wal`
-
-- Add [`Verify` function to perform corruption check on WAL contents](https://github.com/etcd-io/etcd/pull/10603).
-
-### Dependency
-
-- Migrate [`github.com/ugorji/go/codec`](https://github.com/ugorji/go/releases) to [**`github.com/json-iterator/go`**](https://github.com/json-iterator/go) (See [#10667](https://github.com/etcd-io/etcd/pull/10667) for more).
-- Migrate [`github.com/ghodss/yaml`](https://github.com/ghodss/yaml/releases) to [**`sigs.k8s.io/yaml`**](https://github.com/kubernetes-sigs/yaml) (See [#10718](https://github.com/etcd-io/etcd/pull/10718) for more).
-
-### Go
-
-- Compile with [*Go 1.10.8*](https://golang.org/doc/devel/release.html#go1.10).
-
-
-
-
-
-## [v3.3.12](https://github.com/etcd-io/etcd/releases/tag/v3.3.12) (2019-02-07)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.11...v3.3.12) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### etcdctl v3
-
-- [Strip out insecure endpoints from DNS SRV records when using discovery](https://github.com/etcd-io/etcd/pull/10443) with etcdctl v2
-
-### Go
-
-- Compile with [*Go 1.10.8*](https://golang.org/doc/devel/release.html#go1.10).
-
-
-
-
-
-## [v3.3.11](https://github.com/etcd-io/etcd/releases/tag/v3.3.11) (2019-01-11)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.10...v3.3.11) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### gRPC Proxy
-
-- Fix [memory leak in cache layer](https://github.com/etcd-io/etcd/pull/10327).
-
-### Security, Authentication
-
-- Disable [CommonName authentication for gRPC-gateway](https://github.com/etcd-io/etcd/pull/10366) gRPC-gateway proxy requests to etcd server use the etcd client server TLS certificate. If that certificate contains CommonName we do not want to use that for authentication as it could lead to permission escalation.
-
-### Go
-
-- Compile with [*Go 1.10.7*](https://golang.org/doc/devel/release.html#go1.10).
-
-
-
-
-
-## [v3.3.10](https://github.com/etcd-io/etcd/releases/tag/v3.3.10) (2018-10-10)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.9...v3.3.10) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Improved
-
-- Improve ["became inactive" warning log](https://github.com/etcd-io/etcd/pull/10024), which indicates message send to a peer failed.
-- Improve [read index wait timeout warning log](https://github.com/etcd-io/etcd/pull/10026), which indicates that local node might have slow network.
-- Add [gRPC interceptor for debugging logs](https://github.com/etcd-io/etcd/pull/9990); enable `etcd --debug` flag to see per-request debug information.
-- Add [consistency check in snapshot status](https://github.com/etcd-io/etcd/pull/10109). If consistency check on snapshot file fails, `snapshot status` returns `"snapshot file integrity check failed..."` error.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Improve [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/10155) Prometheus metric to track leader heartbeats.
- - Previously, it only samples the TCP connection for snapshot messages.
-- Add [`etcd_snap_db_fsync_duration_seconds_count`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_snap_db_save_total_duration_seconds_bucket`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_server_id`](https://github.com/etcd-io/etcd/pull/9998) Prometheus metric.
-- Add [`etcd_server_health_success`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric.
-- Add [`etcd_server_health_failures`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric.
-- Add [`etcd_server_read_indexes_failed_total`](https://github.com/etcd-io/etcd/pull/10094) Prometheus metric.
-
-### client v3
-
-- Fix logic on [release lock key if cancelled](https://github.com/etcd-io/etcd/pull/10153) in `clientv3/concurrency` package.
-
-### Go
-
-- Compile with [*Go 1.10.4*](https://golang.org/doc/devel/release.html#go1.10).
-
-
-
-
-
-## [v3.3.9](https://github.com/etcd-io/etcd/releases/tag/v3.3.9) (2018-07-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.8...v3.3.9) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Improved
-
-- Improve [Raft Read Index timeout warning messages](https://github.com/etcd-io/etcd/pull/9897).
-
-### Security, Authentication
-
-- Compile with [*Go 1.10.3*](https://golang.org/doc/devel/release.html#go1.10) to support [crypto/x509 "Name Constraints"](https://github.com/etcd-io/etcd/issues/9912).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_server_go_version`](https://github.com/etcd-io/etcd/pull/9957) Prometheus metric.
-- Add [`etcd_server_heartbeat_send_failures_total`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric.
-- Add [`etcd_server_slow_apply_total`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric.
-- Add [`etcd_disk_backend_defrag_duration_seconds`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric.
-- Add [`etcd_mvcc_hash_duration_seconds`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric.
-- Add [`etcd_mvcc_hash_rev_duration_seconds`](https://github.com/etcd-io/etcd/pull/9940) Prometheus metric.
-- Add [`etcd_server_slow_read_indexes_total`](https://github.com/etcd-io/etcd/pull/9897) Prometheus metric.
-- Add [`etcd_server_quota_backend_bytes`](https://github.com/etcd-io/etcd/pull/9820) Prometheus metric.
- - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`.
- - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB.
- - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB.
- - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete.
- - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation.
-- Add [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) Prometheus metric.
- - In addition to [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819).
-- Add [`etcd_mvcc_db_total_size_in_use_in_bytes`](https://github.com/etcd-io/etcd/pull/9256) Prometheus metric.
- - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`.
- - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB.
- - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB.
- - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete.
- - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation.
-
-### client v3
-
-- Fix [lease keepalive interval updates when response queue is full](https://github.com/etcd-io/etcd/pull/9952).
- - If `<-chan *clientv3LeaseKeepAliveResponse` from `clientv3.Lease.KeepAlive` was never consumed or channel is full, client was [sending keepalive request every 500ms](https://github.com/etcd-io/etcd/issues/9911) instead of expected rate of every "TTL / 3" duration.
-
-### Go
-
-- Compile with [*Go 1.10.3*](https://golang.org/doc/devel/release.html#go1.10).
-
-
-
-
-
-## [v3.3.8](https://github.com/etcd-io/etcd/releases/tag/v3.3.8) (2018-06-15)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.7...v3.3.8) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Improved
-
-- Improve [slow request apply warning log](https://github.com/etcd-io/etcd/pull/9288).
- - e.g. `read-only range request "key:\"/a\" range_end:\"/b\" " with result "range_response_count:3 size:96" took too long (97.966µs) to execute`.
- - Redact [request value field](https://github.com/etcd-io/etcd/pull/9822).
- - Provide [response size](https://github.com/etcd-io/etcd/pull/9826).
-- Add [backoff on watch retries on transient errors](https://github.com/etcd-io/etcd/pull/9840).
-
-### Go
-
-- Compile with [*Go 1.9.7*](https://golang.org/doc/devel/release.html#go1.9).
-
-
-
-
-
-## [v3.3.7](https://github.com/etcd-io/etcd/releases/tag/v3.3.7) (2018-06-06)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.6...v3.3.7) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Security, Authentication
-
-- Support TLS cipher suite whitelisting.
- - To block [weak cipher suites](https://github.com/etcd-io/etcd/issues/8320).
- - TLS handshake fails when client hello is requested with invalid cipher suites.
- - Add [`etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/9801) flag.
- - If empty, Go auto-populates the list.
-
-### etcdctl v3
-
-- Fix [`etcdctl move-leader` command for TLS-enabled endpoints](https://github.com/etcd-io/etcd/pull/9807).
-
-### Go
-
-- Compile with [*Go 1.9.6*](https://golang.org/doc/devel/release.html#go1.9).
-
-
-
-
-
-## [v3.3.6](https://github.com/etcd-io/etcd/releases/tag/v3.3.6) (2018-05-31)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.5...v3.3.6) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### etcd server
-
-- Allow [empty auth token](https://github.com/etcd-io/etcd/pull/9369).
- - Previously, when auth token is an empty string, it returns [`failed to initialize the etcd server: auth: invalid auth options` error](https://github.com/etcd-io/etcd/issues/9349).
-- Fix [auth storage panic on server lease revoke routine with JWT token](https://github.com/etcd-io/etcd/issues/9695).
-- Fix [`mvcc` server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775).
- - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation.
- - Now, this server-side panic has been fixed.
-
-### Go
-
-- Compile with [*Go 1.9.6*](https://golang.org/doc/devel/release.html#go1.9).
-
-
-
-
-
-## [v3.3.5](https://github.com/etcd-io/etcd/releases/tag/v3.3.5) (2018-05-09)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.4...v3.3.5) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### etcdctl v3
-
-- Fix [`etcdctl watch [key] [range_end] -- [exec-command…]`](https://github.com/etcd-io/etcd/pull/9688) parsing.
- - Previously, `ETCDCTL_API=3 ./bin/etcdctl watch foo -- echo watch event received` panicked.
-
-### Go
-
-- Compile with [*Go 1.9.6*](https://golang.org/doc/devel/release.html#go1.9).
-
-
-
-
-
-## [v3.3.4](https://github.com/etcd-io/etcd/releases/tag/v3.3.4) (2018-04-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.3...v3.3.4) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_server_is_leader`](https://github.com/etcd-io/etcd/pull/9587) Prometheus metric.
-- Fix [`etcd_debugging_server_lease_expired_total`](https://github.com/etcd-io/etcd/pull/9557) Prometheus metric.
-- Fix [race conditions in v2 server stat collecting](https://github.com/etcd-io/etcd/pull/9562).
-
-### Security, Authentication
-
-- Fix [TLS reload](https://github.com/etcd-io/etcd/pull/9570) when [certificate SAN field only includes IP addresses but no domain names](https://github.com/etcd-io/etcd/issues/9541).
- - In Go, server calls `(*tls.Config).GetCertificate` for TLS reload if and only if server's `(*tls.Config).Certificates` field is not empty, or `(*tls.ClientHelloInfo).ServerName` is not empty with a valid SNI from the client. Previously, etcd always populates `(*tls.Config).Certificates` on the initial client TLS handshake, as non-empty. Thus, client was always expected to supply a matching SNI in order to pass the TLS verification and to trigger `(*tls.Config).GetCertificate` to reload TLS assets.
- - However, a certificate whose SAN field does [not include any domain names but only IP addresses](https://github.com/etcd-io/etcd/issues/9541) would request `*tls.ClientHelloInfo` with an empty `ServerName` field, thus failing to trigger the TLS reload on initial TLS handshake; this becomes a problem when expired certificates need to be replaced online.
- - Now, `(*tls.Config).Certificates` is created empty on initial TLS client handshake, first to trigger `(*tls.Config).GetCertificate`, and then to populate rest of the certificates on every new TLS connection, even when client SNI is empty (e.g. cert only includes IPs).
-
-### etcd server
-
-- Add [`etcd --initial-election-tick-advance`](https://github.com/etcd-io/etcd/pull/9591) flag to configure initial election tick fast-forward.
- - By default, `etcd --initial-election-tick-advance=true`, then local member fast-forwards election ticks to speed up "initial" leader election trigger.
- - This benefits the case of larger election ticks. For instance, cross datacenter deployment may require longer election timeout of 10-second. If true, local node does not need wait up to 10-second. Instead, forwards its election ticks to 8-second, and have only 2-second left before leader election.
- - Major assumptions are that: cluster has no active leader thus advancing ticks enables faster leader election. Or cluster already has an established leader, and rejoining follower is likely to receive heartbeats from the leader after tick advance and before election timeout.
- - However, when network from leader to rejoining follower is congested, and the follower does not receive leader heartbeat within left election ticks, disruptive election has to happen thus affecting cluster availabilities.
- - Now, this can be disabled by setting `--initial-election-tick-advance=false`.
- - Disabling this would slow down initial bootstrap process for cross datacenter deployments. Make tradeoffs by configuring `etcd --initial-election-tick-advance` at the cost of slow initial bootstrap.
- - If single-node, it advances ticks regardless.
- - Address [disruptive rejoining follower node](https://github.com/etcd-io/etcd/issues/9333).
-
-### Package `embed`
-
-- Add [`embed.Config.InitialElectionTickAdvance`](https://github.com/etcd-io/etcd/pull/9591) to enable/disable initial election tick fast-forward.
- - `embed.NewConfig()` would return `*embed.Config` with `InitialElectionTickAdvance` as true by default.
-
-### Go
-
-- Compile with [*Go 1.9.5*](https://golang.org/doc/devel/release.html#go1.9).
-
-
-
-
-
-## [v3.3.3](https://github.com/etcd-io/etcd/releases/tag/v3.3.3) (2018-03-29)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.2...v3.3.3) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Improved
-
-- Adjust [election timeout on server restart](https://github.com/etcd-io/etcd/pull/9415) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333).
- - Previously, etcd fast-forwards election ticks on server start, with only one tick left for leader election. This is to speed up start phase, without having to wait until all election ticks elapse. Advancing election ticks is useful for cross datacenter deployments with larger election timeouts. However, it was affecting cluster availability if the last tick elapses before leader contacts the restarted node.
- - Now, when etcd restarts, it adjusts election ticks with more than one tick left, thus more time for leader to prevent disruptive restart.
-- Adjust [periodic compaction retention window](https://github.com/etcd-io/etcd/pull/9485).
- - e.g. `etcd --auto-compaction-mode=revision --auto-compaction-retention=1000` automatically `Compact` on `"latest revision" - 1000` every 5-minute (when latest revision is 30000, compact on revision 29000).
- - e.g. Previously, `etcd --auto-compaction-mode=periodic --auto-compaction-retention=72h` automatically `Compact` with 72-hour retention windown for every 7.2-hour. **Now, `Compact` happens, for every 1-hour but still with 72-hour retention window.**
- - e.g. Previously, `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` automatically `Compact` with 30-minute retention windown for every 3-minute. **Now, `Compact` happens, for every 30-minute but still with 30-minute retention window.**
- - Periodic compactor keeps recording latest revisions for every compaction period when given period is less than 1-hour, or for every 1-hour when given compaction period is greater than 1-hour (e.g. 1-hour when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`).
- - For every compaction period or 1-hour, compactor uses the last revision that was fetched before compaction period, to discard historical data.
- - The retention window of compaction period moves for every given compaction period or hour.
- - For instance, when hourly writes are 100 and `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`, `v3.2.x`, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 2400, 2640, and 2880 for every 2.4-hour, while `v3.3.3` *or later* compacts revision 2400, 2500, 2600 for every 1-hour.
- - Furthermore, when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` and writes per minute are about 1000, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 30000, 33000, and 36000, for every 3-minute, while `v3.3.3` *or later* compacts revision 30000, 60000, and 90000, for every 30-minute.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add missing [`etcd_network_peer_sent_failures_total` count](https://github.com/etcd-io/etcd/pull/9437).
-
-### Go
-
-- Compile with [*Go 1.9.5*](https://golang.org/doc/devel/release.html#go1.9).
-
-
-
-
-
-## [v3.3.2](https://github.com/etcd-io/etcd/releases/tag/v3.3.2) (2018-03-08)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.1...v3.3.2) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### etcd server
-
-- Fix [server panic on invalid Election Proclaim/Resign HTTP(S) requests](https://github.com/etcd-io/etcd/pull/9379).
- - Previously, wrong-formatted HTTP requests to Election API could trigger panic in etcd server.
- - e.g. `curl -L http://localhost:2379/v3/election/proclaim -X POST -d '{"value":""}'`, `curl -L http://localhost:2379/v3/election/resign -X POST -d '{"value":""}'`.
-- Fix [revision-based compaction retention parsing](https://github.com/etcd-io/etcd/pull/9339).
- - Previously, `etcd --auto-compaction-mode revision --auto-compaction-retention 1` was [translated to revision retention 3600000000000](https://github.com/etcd-io/etcd/issues/9337).
- - Now, `etcd --auto-compaction-mode revision --auto-compaction-retention 1` is correctly parsed as revision retention 1.
-- Prevent [overflow by large `TTL` values for `Lease` `Grant`](https://github.com/etcd-io/etcd/pull/9399).
- - `TTL` parameter to `Grant` request is unit of second.
- - Leases with too large `TTL` values exceeding `math.MaxInt64` [expire in unexpected ways](https://github.com/etcd-io/etcd/issues/9374).
- - Server now returns `rpctypes.ErrLeaseTTLTooLarge` to client, when the requested `TTL` is larger than *9,000,000,000 seconds* (which is >285 years).
- - Again, etcd `Lease` is meant for short-periodic keepalives or sessions, in the range of seconds or minutes. Not for hours or days!
-- Enable etcd server [`raft.Config.CheckQuorum` when starting with `ForceNewCluster`](https://github.com/etcd-io/etcd/pull/9347).
-
-### Proxy v2
-
-- Fix [v2 proxy leaky HTTP requests](https://github.com/etcd-io/etcd/pull/9336).
-
-### Go
-
-- Compile with [*Go 1.9.4*](https://golang.org/doc/devel/release.html#go1.9).
-
-
-
-
-
-## [v3.3.1](https://github.com/etcd-io/etcd/releases/tag/v3.3.1) (2018-02-12)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0...v3.3.1) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Improved
-
-- Add [warnings on requests taking too long](https://github.com/etcd-io/etcd/pull/9288).
- - e.g. `etcdserver: read-only range request "key:\"\\000\" range_end:\"\\000\" " took too long [3.389041388s] to execute`
-
-### etcd server
-
-- Fix [`mvcc` "unsynced" watcher restore operation](https://github.com/etcd-io/etcd/pull/9281).
- - "unsynced" watcher is watcher that needs to be in sync with events that have happened.
- - That is, "unsynced" watcher is the slow watcher that was requested on old revision.
- - "unsynced" watcher restore operation was not correctly populating its underlying watcher group.
- - Which possibly causes [missing events from "unsynced" watchers](https://github.com/etcd-io/etcd/issues/9086).
- - A node gets network partitioned with a watcher on a future revision, and falls behind receiving a leader snapshot after partition gets removed. When applying this snapshot, etcd watch storage moves current synced watchers to unsynced since sync watchers might have become stale during network partition. And reset synced watcher group to restart watcher routines. Previously, there was a bug when moving from synced watcher group to unsynced, thus client would miss events when the watcher was requested to the network-partitioned node.
-
-### Go
-
-- Compile with [*Go 1.9.4*](https://golang.org/doc/devel/release.html#go1.9).
-
-
-
-
-
-## [v3.3.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.0) (2018-02-01)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.2.0...v3.3.0) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
-
-- [v3.3.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.0) (2018-02-01), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.4...v3.3.0).
-- [v3.3.0-rc.4](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.4) (2018-01-22), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.3...v3.3.0-rc.4).
-- [v3.3.0-rc.3](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.3) (2018-01-17), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.2...v3.3.0-rc.3).
-- [v3.3.0-rc.2](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.2) (2018-01-11), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.1...v3.3.0-rc.2).
-- [v3.3.0-rc.1](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.1) (2018-01-02), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0-rc.0...v3.3.0-rc.1).
-- [v3.3.0-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.3.0-rc.0) (2017-12-20), see [code changes](https://github.com/etcd-io/etcd/compare/v3.2.0...v3.3.0-rc.0).
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/).**
-
-### Improved
-
-- Use [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) to replace [`boltdb/bolt`](https://github.com/boltdb/bolt#project-status).
- - Fix [etcd database size grows until `mvcc: database space exceeded`](https://github.com/etcd-io/etcd/issues/8009).
-- [Support database size larger than 8GiB](https://github.com/etcd-io/etcd/pull/7525) (8GiB is now a suggested maximum size for normal environments)
-- [Reduce memory allocation](https://github.com/etcd-io/etcd/pull/8428) on [Range operations](https://github.com/etcd-io/etcd/pull/8475).
-- [Rate limit](https://github.com/etcd-io/etcd/pull/8099) and [randomize](https://github.com/etcd-io/etcd/pull/8101) lease revoke on restart or leader elections.
- - Prevent [spikes in Raft proposal rate](https://github.com/etcd-io/etcd/issues/8096).
-- Support `clientv3` balancer failover under [network faults/partitions](https://github.com/etcd-io/etcd/issues/8711).
-- Better warning on [mismatched `etcd --initial-cluster`](https://github.com/etcd-io/etcd/pull/8083) flag.
- - etcd compares `etcd --initial-advertise-peer-urls` against corresponding `etcd --initial-cluster` URLs with forward-lookup.
- - If resolved IP addresses of `etcd --initial-advertise-peer-urls` and `etcd --initial-cluster` do not match (e.g. [due to DNS error](https://github.com/etcd-io/etcd/pull/9210)), etcd will exit with errors.
- - v3.2 error: `etcd --initial-cluster must include s1=https://s1.test:2380 given --initial-advertise-peer-urls=https://s1.test:2380`.
- - v3.3 error: `failed to resolve https://s1.test:2380 to match --initial-cluster=s1=https://s1.test:2380 (failed to resolve "https://s1.test:2380" (error ...))`.
-
-### Breaking Changes
-
-- Require [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) [**`v1.7.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.4) or [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5).
- - Deprecate [`metadata.Incoming/OutgoingContext`](https://github.com/etcd-io/etcd/pull/7896).
- - Deprecate `grpclog.Logger`, upgrade to [`grpclog.LoggerV2`](https://github.com/etcd-io/etcd/pull/8533).
- - Deprecate [`grpc.ErrClientConnTimeout`](https://github.com/etcd-io/etcd/pull/8505) errors in `clientv3`.
- - Use [`MaxRecvMsgSize` and `MaxSendMsgSize`](https://github.com/etcd-io/etcd/pull/8437) to limit message size, in etcd server.
-- Translate [gRPC status error in v3 client `Snapshot` API](https://github.com/etcd-io/etcd/pull/9038).
-- v3 `etcdctl` [`lease timetolive LEASE_ID`](https://github.com/etcd-io/etcd/issues/9028) on expired lease now prints [`"lease LEASE_ID already expired"`](https://github.com/etcd-io/etcd/pull/9047).
- - <=3.2 prints `"lease LEASE_ID granted with TTL(0s), remaining(-1s)"`.
-- Replace [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint `/v3alpha` with [`/v3beta`](https://github.com/etcd-io/etcd/pull/8880).
- - To deprecate [`/v3alpha`](https://github.com/etcd-io/etcd/issues/8125) in v3.4.
- - In v3.3, `curl -L http://localhost:2379/v3alpha/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` still works as a fallback to `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'`, but `curl -L http://localhost:2379/v3alpha/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` won't work in v3.4. Use `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
-- Change `etcd --auto-compaction-retention` flag to [accept string values](https://github.com/etcd-io/etcd/pull/8563) with [finer granularity](https://github.com/etcd-io/etcd/issues/8503).
- - Now that `etcd --auto-compaction-retention` accepts string values, etcd configuration YAML file `auto-compaction-retention` field must be changed to `string` type.
- - Previously, `--config-file etcd.config.yaml` can have `auto-compaction-retention: 24` field, now must be `auto-compaction-retention: "24"` or `auto-compaction-retention: "24h"`.
- - If configured as `etcd --auto-compaction-mode periodic --auto-compaction-retention "24h"`, the time duration value for `etcd --auto-compaction-retention` flag must be valid for [`time.ParseDuration`](https://golang.org/pkg/time/#ParseDuration) function in Go.
-
-### Dependency
-
-- Upgrade [`boltdb/bolt`](https://github.com/boltdb/bolt#project-status) from [**`v1.3.0`**](https://github.com/boltdb/bolt/releases/tag/v1.3.0) to [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) [**`v1.3.1-coreos.6`**](https://github.com/coreos/bbolt/releases/tag/v1.3.1-coreos.6).
-- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.2.1`**](https://github.com/grpc/grpc-go/releases/tag/v1.2.1) to [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5).
-- Upgrade [`github.com/ugorji/go/codec`](https://github.com/ugorji/go) to [**`v1.1`**](https://github.com/ugorji/go/releases/tag/v1.1), and [regenerate v2 `client`](https://github.com/etcd-io/etcd/pull/8721).
-- Upgrade [`github.com/ugorji/go/codec`](https://github.com/ugorji/go) to [**`ugorji/go@54210f4e0`**](https://github.com/ugorji/go/commit/54210f4e076c57f351166f0ed60e67d3fca57a36), and [regenerate v2 `client`](https://github.com/etcd-io/etcd/pull/8574).
-- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [**`v1.2.2`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.2.2) to [**`v1.3.0`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3.0).
-- Upgrade [`golang.org/x/crypto/bcrypt`](https://github.com/golang/crypto) to [**`golang/crypto@6c586e17d`**](https://github.com/golang/crypto/commit/6c586e17d90a7d08bbbc4069984180dce3b04117).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://github.com/etcd-io/etcd/tree/main/Documentation/metrics) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd --listen-metrics-urls`](https://github.com/etcd-io/etcd/pull/8242) flag for additional `/metrics` and `/health` endpoints.
- - Useful for [bypassing critical APIs when monitoring etcd](https://github.com/etcd-io/etcd/issues/8060).
-- Add [`etcd_server_version`](https://github.com/etcd-io/etcd/pull/8960) Prometheus metric.
- - To replace [Kubernetes `etcd-version-monitor`](https://github.com/etcd-io/etcd/issues/8948).
-- Add [`etcd_debugging_mvcc_db_compaction_keys_total`](https://github.com/etcd-io/etcd/pull/8280) Prometheus metric.
-- Add [`etcd_debugging_server_lease_expired_total`](https://github.com/etcd-io/etcd/pull/8064) Prometheus metric.
- - To improve [lease revoke monitoring](https://github.com/etcd-io/etcd/issues/8050).
-- Document [Prometheus 2.0 rules](https://github.com/etcd-io/etcd/pull/8879).
-- Initialize gRPC server [metrics with zero values](https://github.com/etcd-io/etcd/pull/8878).
-- Fix [range/put/delete operation metrics](https://github.com/etcd-io/etcd/pull/8054) with transaction.
- - `etcd_debugging_mvcc_range_total`
- - `etcd_debugging_mvcc_put_total`
- - `etcd_debugging_mvcc_delete_total`
- - `etcd_debugging_mvcc_txn_total`
-- Fix [`etcd_debugging_mvcc_keys_total`](https://github.com/etcd-io/etcd/pull/8390) on restore.
-- Fix [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/8120) on restore.
- - Also change to [`prometheus.NewGaugeFunc`](https://github.com/etcd-io/etcd/pull/8150).
-
-### Security, Authentication
-
-See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details.
-
-- Add [CRL based connection rejection](https://github.com/etcd-io/etcd/pull/8124) to manage [revoked certs](https://github.com/etcd-io/etcd/issues/4034).
-- Document [TLS authentication changes](https://github.com/etcd-io/etcd/pull/8895).
- - [Server accepts connections if IP matches, without checking DNS entries](https://github.com/etcd-io/etcd/pull/8223). For instance, if peer cert contains IP addresses and DNS names in Subject Alternative Name (SAN) field, and the remote IP address matches one of those IP addresses, server just accepts connection without further checking the DNS names.
- - [Server supports reverse-lookup on wildcard DNS `SAN`](https://github.com/etcd-io/etcd/pull/8281). For instance, if peer cert contains only DNS names (no IP addresses) in Subject Alternative Name (SAN) field, server first reverse-lookups the remote IP address to get a list of names mapping to that address (e.g. `nslookup IPADDR`). Then accepts the connection if those names have a matching name with peer cert's DNS names (either by exact or wildcard match). If none is matched, server forward-lookups each DNS entry in peer cert (e.g. look up `example.default.svc` when the entry is `*.example.default.svc`), and accepts connection only when the host's resolved addresses have the matching IP address with the peer's remote IP address.
-- Add [`etcd --peer-cert-allowed-cn`](https://github.com/etcd-io/etcd/pull/8616) flag.
- - To support [CommonName(CN) based auth](https://github.com/etcd-io/etcd/issues/8262) for inter peer connection.
-- [Swap priority](https://github.com/etcd-io/etcd/pull/8594) of cert CommonName(CN) and username + password.
- - To address ["username and password specified in the request should take priority over CN in the cert"](https://github.com/etcd-io/etcd/issues/8584).
-- Protect [lease revoke with auth](https://github.com/etcd-io/etcd/pull/8031).
-- Provide user's role on [auth permission error](https://github.com/etcd-io/etcd/pull/8164).
-- Fix [auth store panic with disabled token](https://github.com/etcd-io/etcd/pull/8695).
-
-### etcd server
-
-- Add [`etcd --experimental-initial-corrupt-check`](https://github.com/etcd-io/etcd/pull/8554) flag to [check cluster database hashes before serving client/peer traffic](https://github.com/etcd-io/etcd/issues/8313).
- - `etcd --experimental-initial-corrupt-check=false` by default.
- - v3.4 will enable `--initial-corrupt-check=true` by default.
-- Add [`etcd --experimental-corrupt-check-time`](https://github.com/etcd-io/etcd/pull/8420) flag to [raise corrupt alarm monitoring](https://github.com/etcd-io/etcd/issues/7125).
- - `etcd --experimental-corrupt-check-time=0s` disabled by default.
-- Add [`etcd --experimental-enable-v2v3`](https://github.com/etcd-io/etcd/pull/8407) flag to [emulate v2 API with v3](https://github.com/etcd-io/etcd/issues/6925).
- - `etcd --experimental-enable-v2v3=false` by default.
-- Add [`etcd --max-txn-ops`](https://github.com/etcd-io/etcd/pull/7976) flag to [configure maximum number operations in transaction](https://github.com/etcd-io/etcd/issues/7826).
-- Add [`etcd --max-request-bytes`](https://github.com/etcd-io/etcd/pull/7968) flag to [configure maximum client request size](https://github.com/etcd-io/etcd/issues/7923).
- - If not configured, it defaults to 1.5 MiB.
-- Add [`etcd --client-crl-file`, `--peer-crl-file`](https://github.com/etcd-io/etcd/pull/8124) flags for [Certificate revocation list](https://github.com/etcd-io/etcd/issues/4034).
-- Add [`etcd --peer-cert-allowed-cn`](https://github.com/etcd-io/etcd/pull/8616) flag to support [CN-based auth for inter-peer connection](https://github.com/etcd-io/etcd/issues/8262).
-- Add [`etcd --listen-metrics-urls`](https://github.com/etcd-io/etcd/pull/8242) flag for additional `/metrics` and `/health` endpoints.
- - Support [additional (non) TLS `/metrics` endpoints for a TLS-enabled cluster](https://github.com/etcd-io/etcd/pull/8282).
- - e.g. `etcd --listen-metrics-urls=https://localhost:2378,http://localhost:9379` to serve `/metrics` and `/health` on secure port 2378 and insecure port 9379.
- - Useful for [bypassing critical APIs when monitoring etcd](https://github.com/etcd-io/etcd/issues/8060).
-- Add [`etcd --auto-compaction-mode`](https://github.com/etcd-io/etcd/pull/8123) flag to [support revision-based compaction](https://github.com/etcd-io/etcd/issues/8098).
-- Change `etcd --auto-compaction-retention` flag to [accept string values](https://github.com/etcd-io/etcd/pull/8563) with [finer granularity](https://github.com/etcd-io/etcd/issues/8503).
- - Now that `etcd --auto-compaction-retention` accepts string values, etcd configuration YAML file `auto-compaction-retention` field must be changed to `string` type.
- - Previously, `etcd --config-file etcd.config.yaml` can have `auto-compaction-retention: 24` field, now must be `auto-compaction-retention: "24"` or `auto-compaction-retention: "24h"`.
- - If configured as `--auto-compaction-mode periodic --auto-compaction-retention "24h"`, the time duration value for `etcd --auto-compaction-retention` flag must be valid for [`time.ParseDuration`](https://golang.org/pkg/time/#ParseDuration) function in Go.
- - e.g. `etcd --auto-compaction-mode=revision --auto-compaction-retention=1000` automatically `Compact` on `"latest revision" - 1000` every 5-minute (when latest revision is 30000, compact on revision 29000).
- - e.g. `etcd --auto-compaction-mode=periodic --auto-compaction-retention=72h` automatically `Compact` with 72-hour retention windown, for every 7.2-hour.
- - e.g. `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` automatically `Compact` with 30-minute retention windown, for every 3-minute.
- - Periodic compactor continues to record latest revisions for every 1/10 of given compaction period (e.g. 1-hour when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=10h`).
- - For every 1/10 of given compaction period, compactor uses the last revision that was fetched before compaction period, to discard historical data.
- - The retention window of compaction period moves for every 1/10 of given compaction period.
- - For instance, when hourly writes are 100 and `--auto-compaction-retention=10`, v3.1 compacts revision 1000, 2000, and 3000 for every 10-hour, while v3.2.x, v3.3.0, v3.3.1, and v3.3.2 compact revision 1000, 1100, and 1200 for every 1-hour. Furthermore, when writes per minute are 1000, v3.3.0, v3.3.1, and v3.3.2 with `--auto-compaction-mode=periodic --auto-compaction-retention=30m` compact revision 30000, 33000, and 36000, for every 3-minute with more finer granularity.
- - Whether compaction succeeds or not, this process repeats for every 1/10 of given compaction period. If compaction succeeds, it just removes compacted revision from historical revision records.
-- Add [`etcd --grpc-keepalive-min-time`, `etcd --grpc-keepalive-interval`, `etcd --grpc-keepalive-timeout`](https://github.com/etcd-io/etcd/pull/8535) flags to configure server-side keepalive policies.
-- Serve [`/health` endpoint as unhealthy](https://github.com/etcd-io/etcd/pull/8272) when [alarm (e.g. `NOSPACE`) is raised or there's no leader](https://github.com/etcd-io/etcd/issues/8207).
- - Define [`etcdhttp.Health`](https://godoc.org/github.com/coreos/etcd/etcdserver/api/etcdhttp#Health) struct with JSON encoder.
- - Note that `"health"` field is [`string` type, not `bool`](https://github.com/etcd-io/etcd/pull/9143).
- - e.g. `{"health":"false"}`, `{"health":"true"}`
- - [Remove `"errors"` field](https://github.com/etcd-io/etcd/pull/9162) since `v3.3.0-rc.3` (did exist only in `v3.3.0-rc.0`, `v3.3.0-rc.1`, `v3.3.0-rc.2`).
-- Move [logging setup to embed package](https://github.com/etcd-io/etcd/pull/8810)
- - Disable gRPC server info-level logs by default (can be enabled with `etcd --debug` flag).
-- Use [monotonic time in Go 1.9](https://github.com/etcd-io/etcd/pull/8507) for `lease` package.
-- Warn on [empty hosts in advertise URLs](https://github.com/etcd-io/etcd/pull/8384).
- - Address [advertise client URLs accepts empty hosts](https://github.com/etcd-io/etcd/issues/8379).
- - etcd v3.4 will exit on this error.
- - e.g. `etcd --advertise-client-urls=http://:2379`.
-- Warn on [shadowed environment variables](https://github.com/etcd-io/etcd/pull/8385).
- - Address [error on shadowed environment variables](https://github.com/etcd-io/etcd/issues/8380).
- - etcd v3.4 will exit on this error.
-
-### API
-
-- Support [ranges in transaction comparisons](https://github.com/etcd-io/etcd/pull/8025) for [disconnected linearized reads](https://github.com/etcd-io/etcd/issues/7924).
-- Add [nested transactions](https://github.com/etcd-io/etcd/pull/8102) to extend [proxy use cases](https://github.com/etcd-io/etcd/issues/7857).
-- Add [lease comparison target in transaction](https://github.com/etcd-io/etcd/pull/8324).
-- Add [lease list](https://github.com/etcd-io/etcd/pull/8358).
-- Add [hash by revision](https://github.com/etcd-io/etcd/pull/8263) for [better corruption checking against boltdb](https://github.com/etcd-io/etcd/issues/8016).
-
-### client v3
-
-- Add [health balancer](https://github.com/etcd-io/etcd/pull/8545) to fix [watch API hangs](https://github.com/etcd-io/etcd/issues/7247), improve [endpoint switch under network faults](https://github.com/etcd-io/etcd/issues/7941).
-- [Refactor balancer](https://github.com/etcd-io/etcd/pull/8840) and add [client-side keepalive pings](https://github.com/etcd-io/etcd/pull/8199) to handle [network partitions](https://github.com/etcd-io/etcd/issues/8711).
-- Add [`MaxCallSendMsgSize` and `MaxCallRecvMsgSize`](https://github.com/etcd-io/etcd/pull/9047) fields to [`clientv3.Config`](https://godoc.org/github.com/coreos/etcd/clientv3#Config).
- - Fix [exceeded response size limit error in client-side](https://github.com/etcd-io/etcd/issues/9043).
- - Address [kubernetes#51099](https://github.com/kubernetes/kubernetes/issues/51099).
- - In previous versions(v3.2.10, v3.2.11), client response size was limited to only 4 MiB.
- - `MaxCallSendMsgSize` default value is 2 MiB, if not configured.
- - `MaxCallRecvMsgSize` default value is `math.MaxInt32`, if not configured.
-- Accept [`Compare_LEASE`](https://github.com/etcd-io/etcd/pull/8324) in [`clientv3.Compare`](https://godoc.org/github.com/coreos/etcd/clientv3#Compare).
-- Add [`LeaseValue` helper](https://github.com/etcd-io/etcd/pull/8488) to `Cmp` `LeaseID` values in `Txn`.
-- Add [`MoveLeader`](https://github.com/etcd-io/etcd/pull/8153) to `Maintenance`.
-- Add [`HashKV`](https://github.com/etcd-io/etcd/pull/8351) to `Maintenance`.
-- Add [`Leases`](https://github.com/etcd-io/etcd/pull/8358) to `Lease`.
-- Add [`clientv3/ordering`](https://github.com/etcd-io/etcd/pull/8092) for enforce [ordering in serialized requests](https://github.com/etcd-io/etcd/issues/7623).
-- Fix ["put at-most-once" violation](https://github.com/etcd-io/etcd/pull/8335).
-- Fix [`WatchResponse.Canceled`](https://github.com/etcd-io/etcd/pull/8283) on [compacted watch request](https://github.com/etcd-io/etcd/issues/8231).
-- Fix [`concurrency/stm` `Put` with serializable snapshot](https://github.com/etcd-io/etcd/pull/8439).
- - Use store revision from first fetch to resolve write conflicts instead of modified revision.
-
-### etcdctl v3
-
-- Add [`etcdctl --discovery-srv`](https://github.com/etcd-io/etcd/pull/8462) flag.
-- Add [`etcdctl --keepalive-time`, `--keepalive-timeout`](https://github.com/etcd-io/etcd/pull/8663) flags.
-- Add [`etcdctl lease list`](https://github.com/etcd-io/etcd/pull/8358) command.
-- Add [`etcdctl lease keep-alive --once`](https://github.com/etcd-io/etcd/pull/8775) flag.
-- Make [`lease timetolive LEASE_ID`](https://github.com/etcd-io/etcd/issues/9028) on expired lease print [`lease LEASE_ID already expired`](https://github.com/etcd-io/etcd/pull/9047).
- - <=3.2 prints `lease LEASE_ID granted with TTL(0s), remaining(-1s)`.
-- Add [`etcdctl snapshot restore --wal-dir`](https://github.com/etcd-io/etcd/pull/9124) flag.
-- Add [`etcdctl defrag --data-dir`](https://github.com/etcd-io/etcd/pull/8367) flag.
-- Add [`etcdctl move-leader`](https://github.com/etcd-io/etcd/pull/8153) command.
-- Add [`etcdctl endpoint hashkv`](https://github.com/etcd-io/etcd/pull/8351) command.
-- Add [`etcdctl endpoint --cluster`](https://github.com/etcd-io/etcd/pull/8143) flag, equivalent to [v2 `etcdctl cluster-health`](https://github.com/etcd-io/etcd/issues/8117).
-- Make `etcdctl endpoint health` command terminate with [non-zero exit code on unhealthy status](https://github.com/etcd-io/etcd/pull/8342).
-- Add [`etcdctl lock --ttl`](https://github.com/etcd-io/etcd/pull/8370) flag.
-- Support [`etcdctl watch [key] [range_end] -- [exec-command…]`](https://github.com/etcd-io/etcd/pull/8919), equivalent to [v2 `etcdctl exec-watch`](https://github.com/etcd-io/etcd/issues/8814).
- - Make `etcdctl watch -- [exec-command]` set environmental variables [`ETCD_WATCH_REVISION`, `ETCD_WATCH_EVENT_TYPE`, `ETCD_WATCH_KEY`, `ETCD_WATCH_VALUE`](https://github.com/etcd-io/etcd/pull/9142) for each event.
-- Support [`etcdctl watch` with environmental variables `ETCDCTL_WATCH_KEY` and `ETCDCTL_WATCH_RANGE_END`](https://github.com/etcd-io/etcd/pull/9142).
-- Enable [`clientv3.WithRequireLeader(context.Context)` for `watch`](https://github.com/etcd-io/etcd/pull/8672) command.
-- Print [`"del"` instead of `"delete"`](https://github.com/etcd-io/etcd/pull/8297) in `txn` interactive mode.
-- Print [`ETCD_INITIAL_ADVERTISE_PEER_URLS` in `member add`](https://github.com/etcd-io/etcd/pull/8332).
-- Fix [`etcdctl snapshot status` to not modify snapshot file](https://github.com/etcd-io/etcd/pull/8815).
- - For example, start etcd `v3.3.10`
- - Write some data
- - Use etcdctl `v3.3.10` to save snapshot
- - Somehow, upgrading Kubernetes fails, thus rolling back to previous version etcd `v3.2.24`
- - Run etcdctl `v3.2.24` `snapshot status` against the snapshot file saved from `v3.3.10` server
- - Run etcdctl `v3.2.24` `snapshot restore` fails with `"expected sha256 [12..."`
-
-### etcdctl v3
-
-- Handle [empty key permission](https://github.com/etcd-io/etcd/pull/8514) in `etcdctl`.
-
-### etcdctl v2
-
-- Add [`etcdctl backup --with-v3`](https://github.com/etcd-io/etcd/pull/8479) flag.
-
-### gRPC Proxy
-
-- Add [`grpc-proxy start --experimental-leasing-prefix`](https://github.com/etcd-io/etcd/pull/8341) flag.
- - For disconnected linearized reads.
- - Based on [V system leasing](https://github.com/etcd-io/etcd/issues/6065).
- - See ["Disconnected consistent reads with etcd" blog post](https://coreos.com/blog/coreos-labs-disconnected-consistent-reads-with-etcd).
-- Add [`grpc-proxy start --experimental-serializable-ordering`](https://github.com/etcd-io/etcd/pull/8315) flag.
- - To ensure serializable reads have monotonically increasing store revisions across endpoints.
-- Add [`grpc-proxy start --metrics-addr`](https://github.com/etcd-io/etcd/pull/8242) flag for an additional `/metrics` endpoint.
- - Set `--metrics-addr=http://[HOST]:9379` to serve `/metrics` in insecure port 9379.
-- Serve [`/health` endpoint in grpc-proxy](https://github.com/etcd-io/etcd/pull/8322).
-- Add [`grpc-proxy start --debug`](https://github.com/etcd-io/etcd/pull/8994) flag.
-- Add [`grpc-proxy start --max-send-bytes`](https://github.com/etcd-io/etcd/pull/9250) flag to [configure maximum client request size](https://github.com/etcd-io/etcd/issues/7923).
-- Add [`grpc-proxy start --max-recv-bytes`](https://github.com/etcd-io/etcd/pull/9250) flag to [configure maximum client request size](https://github.com/etcd-io/etcd/issues/7923).
-- Fix [Snapshot API error handling](https://github.com/etcd-io/etcd/commit/dbd16d52fbf81e5fd806d21ff5e9148d5bf203ab).
-- Fix [KV API `PrevKv` flag handling](https://github.com/etcd-io/etcd/pull/8366).
-- Fix [KV API `KeysOnly` flag handling](https://github.com/etcd-io/etcd/pull/8552).
-
-### gRPC gateway
-
-- Replace [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint `/v3alpha` with [`/v3beta`](https://github.com/etcd-io/etcd/pull/8880).
- - To deprecate [`/v3alpha`](https://github.com/etcd-io/etcd/issues/8125) in v3.4.
- - In v3.3, `curl -L http://localhost:2379/v3alpha/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` still works as a fallback to `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'`, but `curl -L http://localhost:2379/v3alpha/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` won't work in v3.4. Use `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
-- Support ["authorization" token](https://github.com/etcd-io/etcd/pull/7999).
-- Support [websocket for bi-directional streams](https://github.com/etcd-io/etcd/pull/8257).
- - Fix [`Watch` API with gRPC gateway](https://github.com/etcd-io/etcd/issues/8237).
-- Upgrade gRPC gateway to [v1.3.0](https://github.com/etcd-io/etcd/issues/8838).
-
-### etcd server
-
-- Fix [backend database in-memory index corruption](https://github.com/etcd-io/etcd/pull/8127) issue on restore (only 3.2.0 is affected).
-- Fix [watch restore from snapshot](https://github.com/etcd-io/etcd/pull/8427).
-- Fix [`mvcc/backend.defragdb` nil-pointer dereference on create bucket failure](https://github.com/etcd-io/etcd/pull/9119).
-- Fix [server crash](https://github.com/etcd-io/etcd/pull/8010) on [invalid transaction request from gRPC gateway](https://github.com/etcd-io/etcd/issues/7889).
-- Prevent [server panic from member update/add](https://github.com/etcd-io/etcd/pull/9174) with [wrong scheme URLs](https://github.com/etcd-io/etcd/issues/9173).
-- Make [peer dial timeout longer](https://github.com/etcd-io/etcd/pull/8599).
- - See [coreos/etcd-operator#1300](https://github.com/etcd-io/etcd-operator/issues/1300) for more detail.
-- Make server [wait up to request time-out](https://github.com/etcd-io/etcd/pull/8267) with [pending RPCs](https://github.com/etcd-io/etcd/issues/8224).
-- Fix [`grpc.Server` panic on `GracefulStop`](https://github.com/etcd-io/etcd/pull/8987) with [TLS-enabled server](https://github.com/etcd-io/etcd/issues/8916).
-- Fix ["multiple peer URLs cannot start" issue](https://github.com/etcd-io/etcd/issues/8383).
-- Fix server-side auth so [concurrent auth operations do not return old revision error](https://github.com/etcd-io/etcd/pull/8442).
-- Handle [WAL renaming failure on Windows](https://github.com/etcd-io/etcd/pull/8286).
-- Upgrade [`coreos/go-systemd`](https://github.com/coreos/go-systemd/releases) to `v15` (see https://github.com/coreos/go-systemd/releases/tag/v15).
-- [Put back `/v2/machines`](https://github.com/etcd-io/etcd/pull/8062) endpoint for python-etcd wrapper.
-
-### client v2
-
-- [Fail-over v2 client](https://github.com/etcd-io/etcd/pull/8519) to next endpoint on [oneshot failure](https://github.com/etcd-io/etcd/issues/8515).
-
-### Package `raft`
-
-- Add [non-voting member](https://github.com/etcd-io/etcd/pull/8751).
- - To implement [Raft thesis 4.2.1 Catching up new servers](https://github.com/etcd-io/etcd/issues/8568).
- - `Learner` node does not vote or promote itself.
-
-### Other
-
-- Support previous two minor versions (see our [new release policy](https://github.com/etcd-io/etcd/pull/8805)).
-- `v3.3.x` is the last release cycle that supports `ACI`.
- - [AppC was officially suspended](https://github.com/appc/spec#-disclaimer-), as of late 2016.
- - [`acbuild`](https://github.com/containers/build#this-project-is-currently-unmaintained) is not maintained anymore.
- - `*.aci` files won't be available from etcd v3.4 release.
-- Add container registry [`gcr.io/etcd-development/etcd`](https://gcr.io/etcd-development/etcd).
- - [quay.io/coreos/etcd](https://quay.io/coreos/etcd) is still supported as secondary.
-
-### Go
-
-- Require [*Go 1.9+*](https://github.com/etcd-io/etcd/issues/6174).
-- Compile with [*Go 1.9.3*](https://golang.org/doc/devel/release.html#go1.9).
-- Deprecate [`golang.org/x/net/context`](https://github.com/etcd-io/etcd/pull/8511).
-
-
-
-
diff --git a/CHANGELOG/CHANGELOG-3.4.md b/CHANGELOG/CHANGELOG-3.4.md
deleted file mode 100644
index 77caa2bfb73..00000000000
--- a/CHANGELOG/CHANGELOG-3.4.md
+++ /dev/null
@@ -1,1199 +0,0 @@
-
-
-Previous change logs can be found at [CHANGELOG-3.3](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.3.md).
-
-
-
-## v3.4.24 (TBD)
-
-### etcd server
-- Fix [etcdserver might promote a non-started learner](https://github.com/etcd-io/etcd/pull/15097).
-- Improve [mvcc: reduce count-only range overhead](https://github.com/etcd-io/etcd/pull/15099)
-- Improve [mvcc: push down RangeOptions.limit argv into index tree to reduce memory overhead](https://github.com/etcd-io/etcd/pull/15137)
-- Improve [server: set multiple concurrentReadTx instances share one txReadBuffer](https://github.com/etcd-io/etcd/pull/15195)
-
-### Dependency
-- Upgrade [github.com/grpc-ecosystem/grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [v1.9.5](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.9.5) to [v1.11.0](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.11.0).
-- Bump bbolt to [v1.3.7](https://github.com/etcd-io/etcd/pull/15223).
-
-### Other
-- Updated [base image from base-debian11 to static-debian11 and removed dependency on busybox](https://github.com/etcd-io/etcd/pull/15038).
-
-### Package `netutil`
-- Fix [consistently format IPv6 addresses for comparison](https://github.com/etcd-io/etcd/pull/15188)
-
-
-
-## v3.4.23 (2022-12-21)
-
-### Package `clientv3`
-- Fix [Refreshing token on CommonName based authentication causes segmentation violation in client](https://github.com/etcd-io/etcd/pull/14792).
-
-### etcd server
-- Fix [Remove memberID from data corrupt alarm](https://github.com/etcd-io/etcd/pull/14853).
-- Fix [nil pointer panic for readonly txn due to nil response](https://github.com/etcd-io/etcd/pull/14900).
-
-### Security
-- Use [distroless base image](https://github.com/etcd-io/etcd/pull/15017) to address critical Vulnerabilities.
-- Bumped [some dependencies](https://github.com/etcd-io/etcd/pull/15019) to address some HIGH Vulnerabilities.
-
-### Go
-- Require [Go 1.17+](https://github.com/etcd-io/etcd/pull/15019).
-- Compile with [Go 1.17+](https://go.dev/doc/devel/release#go1.17)
-
-
-
-## v3.4.22 (2022-11-02)
-
-### etcd server
-- Fix [memberID equals zero in corruption alarm](https://github.com/etcd-io/etcd/pull/14530)
-- Fix [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/14548)
-- Fix [avoid closing a watch with ID 0 incorrectly](https://github.com/etcd-io/etcd/pull/14562)
-- Fix [auth: fix data consistency issue caused by recovery from snapshot](https://github.com/etcd-io/etcd/pull/14649)
-
-### Package `netutil`
-- Fix [netutil: add url comparison without resolver to URLStringsEqual](https://github.com/etcd-io/etcd/pull/14577)
-
-### Package `clientv3`
-- Fix [Add backoff before retry when watch stream returns unavailable](https://github.com/etcd-io/etcd/pull/14581).
-
-### etcd grpc-proxy
-- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14601) flag to support adding configurable cipher list.
-
-
-
-## v3.4.21 (2022-09-15)
-
-### etcd server
-- Fix [Durability API guarantee broken in single node cluster](https://github.com/etcd-io/etcd/pull/14423)
-- Fix [Panic due to nil log object](https://github.com/etcd-io/etcd/pull/14420)
-- Fix [authentication data not loaded on member startup](https://github.com/etcd-io/etcd/pull/14410)
-
-### etcdctl v3
-
-- Fix [etcdctl move-leader may fail for multiple endpoints](https://github.com/etcd-io/etcd/pull/14441)
-
-
-
-## v3.4.20 (2022-08-06)
-
-### Package `clientv3`
-
-- Fix [filter learners members during autosync](https://github.com/etcd-io/etcd/pull/14236).
-
-### etcd server
-- Add [`etcd --max-concurrent-streams`](https://github.com/etcd-io/etcd/pull/14251) flag to configure the max concurrent streams each client can open at a time, and defaults to math.MaxUint32.
-- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/14253) flag to enable checkpoint persisting.
-- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/14253), requires enabling checkpoint persisting.
-- Fix [Protect rangePermCache with a RW lock correctly](https://github.com/etcd-io/etcd/pull/14230)
-- Fix [raft: postpone MsgReadIndex until first commit in the term](https://github.com/etcd-io/etcd/pull/14258)
-- Fix [etcdserver: resend ReadIndex request on empty apply request](https://github.com/etcd-io/etcd/pull/14269)
-- Fix [remove temp files in snap dir when etcdserver starting](https://github.com/etcd-io/etcd/pull/14246)
-- Fix [Etcdserver is still in progress of processing LeaseGrantRequest when it receives a LeaseKeepAliveRequest on the same leaseID](https://github.com/etcd-io/etcd/pull/14177)
-- Fix [Grant lease with negative ID can possibly cause db out of sync](https://github.com/etcd-io/etcd/pull/14239)
-- Fix [Allow non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/14254)
-
-
-
-## v3.4.19 (2022-07-12)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.18...v3.4.19) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### etcd server
-- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13475).
-- Fix [Defrag unsets backend options](https://github.com/etcd-io/etcd/pull/13713).
-- Fix [lease leak issue due to tokenProvider isn't enabled when restoring auth store from a snapshot](https://github.com/etcd-io/etcd/pull/13206).
-- Fix [the race condition between goroutine and channel on the same leases to be revoked](https://github.com/etcd-io/etcd/pull/14150).
-- Fix [lessor may continue to schedule checkpoint after stepping down leader role](https://github.com/etcd-io/etcd/pull/14150).
-
-### Package `clientv3`
-- Fix [a bug of not refreshing expired tokens](https://github.com/etcd-io/etcd/pull/13999).
-
-### Dependency
-- Upgrade [go.etcd.io/bbolt](https://github.com/etcd-io/bbolt/releases) from [v1.3.3](https://github.com/etcd-io/bbolt/releases/tag/v1.3.3) to [v1.3.6](https://github.com/etcd-io/bbolt/releases/tag/v1.3.6).
-
-### Security
-- Upgrade [golang.org/x/crypto](https://github.com/etcd-io/etcd/pull/14179) to v0.0.0-20220411220226-7b82a4e95df4 to address [CVE-2022-27191 ](https://github.com/advisories/GHSA-8c26-wmh5-6g9v).
-- Upgrade [gopkg.in/yaml.v2](https://github.com/etcd-io/etcd/pull/14192) to v2.4.0 to address [CVE-2019-11254](https://github.com/advisories/GHSA-wxc4-f4m6-wwqv).
-
-### Go
-- Require [Go 1.16+](https://github.com/etcd-io/etcd/pull/14136).
-- Compile with [Go 1.16+](https://go.dev/doc/devel/release#go1.16).
-- etcd uses [go modules](https://github.com/etcd-io/etcd/pull/14136) (instead of vendor dir) to track dependencies.
-
-
-
-## v3.4.18 (2021-10-15)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.17...v3.4.18) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
-
-- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13397).
-
-### Other
-
-- Updated [base image](https://github.com/etcd-io/etcd/pull/13386) from `debian:buster-v1.4.0` to `debian:bullseye-20210927` to fix the following critical CVEs:
- - [CVE-2021-3711](https://nvd.nist.gov/vuln/detail/CVE-2021-3711): miscalculation of a buffer size in openssl's SM2 decryption
- - [CVE-2021-35942](https://nvd.nist.gov/vuln/detail/CVE-2021-35942): integer overflow flaw in glibc
- - [CVE-2019-9893](https://nvd.nist.gov/vuln/detail/CVE-2019-9893): incorrect syscall argument generation in libseccomp
- - [CVE-2021-36159](https://nvd.nist.gov/vuln/detail/CVE-2021-36159): libfetch in apk-tools mishandles numeric strings in FTP and HTTP protocols to allow out of bound reads.
-
-
-
-## v3.4.17 (2021-10-03)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.16...v3.4.17) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### `etcdctl`
-
-- Fix [etcdctl check datascale command](https://github.com/etcd-io/etcd/pull/11896) to work with https endpoints.
-
-### gRPC gateway
-
-- Add [`MaxCallRecvMsgSize`](https://github.com/etcd-io/etcd/pull/13077) support for http client.
-
-### Dependency
-
-- Replace [`github.com/dgrijalva/jwt-go with github.com/golang-jwt/jwt'](https://github.com/etcd-io/etcd/pull/13378).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-## v3.4.16 (2021-05-11)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.15...v3.4.16) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### etcd server
-
-- Add [`--experimental-warning-apply-duration`](https://github.com/etcd-io/etcd/pull/12448) flag which allows apply duration threshold to be configurable.
-- Fix [`--unsafe-no-fsync`](https://github.com/etcd-io/etcd/pull/12751) to still write-out data avoiding corruption (most of the time).
-- Reduce [around 30% memory allocation by logging range response size without marshal](https://github.com/etcd-io/etcd/pull/12871).
-- Add [exclude alarms from health check conditionally](https://github.com/etcd-io/etcd/pull/12880).
-
-### Metrics
-
-- Fix [incorrect metrics generated when clients cancel watches](https://github.com/etcd-io/etcd/pull/12803) back-ported from (https://github.com/etcd-io/etcd/pull/12196).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.15](https://github.com/etcd-io/etcd/releases/tag/v3.4.15) (2021-02-26)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.14...v3.4.15) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### etcd server
-
-- Log [successful etcd server-side health check in debug level](https://github.com/etcd-io/etcd/pull/12677).
-- Fix [64 KB websocket notification message limit](https://github.com/etcd-io/etcd/pull/12402).
-
-### Package `fileutil`
-
-- Fix [`F_OFD_` constants](https://github.com/etcd-io/etcd/pull/12444).
-
-### Dependency
-
-- Bump up [`gorilla/websocket` to v1.4.2](https://github.com/etcd-io/etcd/pull/12645).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.14](https://github.com/etcd-io/etcd/releases/tag/v3.4.14) (2020-11-25)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.13...v3.4.14) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### Package `clientv3`
-
-- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready.
-
-### etcd server
-
-- [Fix server panic](https://github.com/etcd-io/etcd/pull/12288) when force-new-cluster flag is enabled in a cluster which had learner node.
-
-### Package `netutil`
-
-- Remove [`netutil.DropPort/RecoverPort/SetLatency/RemoveLatency`](https://github.com/etcd-io/etcd/pull/12491).
- - These are not used anymore. They were only used for older versions of functional testing.
- - Removed to adhere to best security practices, minimize arbitrary shell invocation.
-
-### `tools/etcd-dump-metrics`
-
-- Implement [input validation to prevent arbitrary shell invocation](https://github.com/etcd-io/etcd/pull/12491).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.13](https://github.com/etcd-io/etcd/releases/tag/v3.4.13) (2020-8-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.12...v3.4.13) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### Security
-
-- A [log warning](https://github.com/etcd-io/etcd/pull/12242) is added when etcd use any existing directory that has a permission different than 700 on Linux and 777 on Windows.
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.12](https://github.com/etcd-io/etcd/releases/tag/v3.4.12) (2020-08-19)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.11...v3.4.12) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### etcd server
-
-- Fix [server panic in slow writes warnings](https://github.com/etcd-io/etcd/issues/12197).
- - Fixed via [PR#12238](https://github.com/etcd-io/etcd/pull/12238).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-
-
-## [v3.4.11](https://github.com/etcd-io/etcd/releases/tag/v3.4.11) (2020-08-18)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.10...v3.4.11) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### etcd server
-
-- Improve [`runtime.FDUsage` call pattern to reduce objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986).
-- Add [`etcd --experimental-watch-progress-notify-interval`](https://github.com/etcd-io/etcd/pull/12216) flag to make watch progress notify interval configurable.
-
-### Package `clientv3`
-
-- Remove [excessive watch cancel logging messages](https://github.com/etcd-io/etcd/pull/12187).
- - See [kubernetes/kubernetes#93450](https://github.com/kubernetes/kubernetes/issues/93450).
-
-### Package `runtime`
-
-- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214).
-
-### Metrics, Monitoring
-
-- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214).
-- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13397).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-
-
-
-
-## [v3.4.10](https://github.com/etcd-io/etcd/releases/tag/v3.4.10) (2020-07-16)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.9...v3.4.10) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### Package `etcd server`
-
-- Add [`--unsafe-no-fsync`](https://github.com/etcd-io/etcd/pull/11946) flag.
- - Setting the flag disables all uses of fsync, which is unsafe and will cause data loss. This flag makes it possible to run an etcd node for testing and development without placing lots of load on the file system.
-- Add [etcd --auth-token-ttl](https://github.com/etcd-io/etcd/pull/11980) flag to customize `simpleTokenTTL` settings.
-- Improve [runtime.FDUsage objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986).
-- Improve [mvcc.watchResponse channel Memory Usage](https://github.com/etcd-io/etcd/pull/11987).
-- Fix [`int64` convert panic in raft logger](https://github.com/etcd-io/etcd/pull/12106).
- - Fix [kubernetes/kubernetes#91937](https://github.com/kubernetes/kubernetes/issues/91937).
-
-### Breaking Changes
-
-- Changed behavior on [existing dir permission](https://github.com/etcd-io/etcd/pull/11798).
- - Previously, the permission was not checked on existing data directory and the directory used for automatically generating self-signed certificates for TLS connections with clients. Now a check is added to make sure those directories, if already exist, has a desired permission of 700 on Linux and 777 on Windows.
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.9](https://github.com/etcd-io/etcd/releases/tag/v3.4.9) (2020-05-20)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.8...v3.4.9) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### Package `wal`
-
-- Add [missing CRC checksum check in WAL validate method otherwise causes panic](https://github.com/etcd-io/etcd/pull/11924).
- - See https://github.com/etcd-io/etcd/issues/11918.
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.8](https://github.com/etcd-io/etcd/releases/tag/v3.4.8) (2020-05-18)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.7...v3.4.8) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### `etcdctl`
-
-- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
-
-### Package `clientv3`
-
-- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
-
-### etcd server
-
-- Improve logging around snapshot send and receive.
-- [Add log when etcdserver failed to apply command](https://github.com/etcd-io/etcd/pull/11670).
-- [Fix deadlock bug in mvcc](https://github.com/etcd-io/etcd/pull/11817).
-- Fix [inconsistency between WAL and server snapshot](https://github.com/etcd-io/etcd/pull/11888).
- - Previously, server restore fails if it had crashed after persisting raft hard state but before saving snapshot.
- - See https://github.com/etcd-io/etcd/issues/10219 for more.
-
-### Package Auth
-
-- [Fix a data corruption bug by saving consistent index](https://github.com/etcd-io/etcd/pull/11652).
-
-### Metrics, Monitoring
-
-- Add [`etcd_debugging_auth_revision`](https://github.com/etcd-io/etcd/commit/f14d2a087f7b0fd6f7980b95b5e0b945109c95f3).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.7](https://github.com/etcd-io/etcd/releases/tag/v3.4.7) (2020-04-01)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.6...v3.4.7) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### etcd server
-
-- Improve [compaction performance when latest index is greater than 1-million](https://github.com/etcd-io/etcd/pull/11734).
-
-### Package `wal`
-
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-
-### Metrics, Monitoring
-
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.6](https://github.com/etcd-io/etcd/releases/tag/v3.4.6) (2020-03-29)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.5...v3.4.6) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-### Package `lease`
-
-- Fix [memory leak in follower nodes](https://github.com/etcd-io/etcd/pull/11731).
- - https://github.com/etcd-io/etcd/issues/11495
- - https://github.com/etcd-io/etcd/issues/11730
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.5](https://github.com/etcd-io/etcd/releases/tag/v3.4.5) (2020-03-18)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.4...v3.4.5) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).**
-
-### etcd server
-
-- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704).
-
-### client v3
-
-- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687).
- - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys.
-
-### etcdctl v3
-
-- Fix [`etcdctl member add`](https://github.com/etcd-io/etcd/pull/11638) command to prevent potential timeout.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
-
-- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687).
-
-### gRPC Proxy
-
-- Fix [`panic on error`](https://github.com/etcd-io/etcd/pull/11694) for metrics handler.
-
-### Go
-
-- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.4](https://github.com/etcd-io/etcd/releases/tag/v3.4.4) (2020-02-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.3...v3.4.4) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).**
-
-### etcd server
-
-- Fix [`wait purge file loop during shutdown`](https://github.com/etcd-io/etcd/pull/11308).
- - Previously, during shutdown etcd could accidentally remove needed wal files, resulting in catastrophic error `etcdserver: open wal error: wal: file not found.` during startup.
- - Now, etcd makes sure the purge file loop exits before server signals stop of the raft node.
-- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613).
-- Fix [quorum protection logic when promoting a learner](https://github.com/etcd-io/etcd/pull/11640).
-- Improve [peer corruption checker](https://github.com/etcd-io/etcd/pull/11621) to work when peer mTLS is enabled.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_debugging_mvcc_total_put_size_in_bytes`](https://github.com/etcd-io/etcd/pull/11374) Prometheus metric.
-- Fix bug where [etcd_debugging_mvcc_db_compaction_keys_total is always 0](https://github.com/etcd-io/etcd/pull/11400).
-
-### Auth
-
-- Fix [NoPassword check when adding user through GRPC gateway](https://github.com/etcd-io/etcd/pull/11418) ([issue#11414](https://github.com/etcd-io/etcd/issues/11414))
-- Fix bug where [some auth related messages are logged at wrong level](https://github.com/etcd-io/etcd/pull/11586)
-
-
-
-
-
-## [v3.4.3](https://github.com/etcd-io/etcd/releases/tag/v3.4.3) (2019-10-24)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.2...v3.4.3) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).**
-
-### Metrics, Monitoring
-
-See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Change [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11254) Prometheus metrics to include only major and minor version.
-
-### Go
-
-- Compile with [*Go 1.12.12*](https://golang.org/doc/devel/release.html#go1.12).
-
-
-
-
-
-## [v3.4.2](https://github.com/etcd-io/etcd/releases/tag/v3.4.2) (2019-10-11)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.1...v3.4.2) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).**
-
-### etcdctl v3
-
-- Fix [`etcdctl member add`](https://github.com/etcd-io/etcd/pull/11194) command to prevent potential timeout.
-
-### etcdserver
-
-- Add [`tracing`](https://github.com/etcd-io/etcd/pull/11179) to range, put and compact requests in etcdserver.
-
-### Go
-
-- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes.
-
-### client v3
-
-- Fix [client balancer failover against multiple endpoints](https://github.com/etcd-io/etcd/pull/11184).
- - Fix ["kube-apiserver: failover on multi-member etcd cluster fails certificate check on DNS mismatch" (kubernetes#83028)](https://github.com/kubernetes/kubernetes/issues/83028).
-- Fix [IPv6 endpoint parsing in client](https://github.com/etcd-io/etcd/pull/11211).
- - Fix ["1.16: etcd client does not parse IPv6 addresses correctly when members are joining" (kubernetes#83550)](https://github.com/kubernetes/kubernetes/issues/83550).
-
-
-
-
-
-## [v3.4.1](https://github.com/etcd-io/etcd/releases/tag/v3.4.1) (2019-09-17)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.4.1) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).**
-
-### Metrics, Monitoring
-
-See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-
-### etcd server
-
-- Fix [secure server logging message](https://github.com/etcd-io/etcd/commit/8b053b0f44c14ac0d9f39b9b78c17c57d47966eb).
-- Remove [redundant `%` characters in file descriptor warning message](https://github.com/etcd-io/etcd/commit/d5f79adc9cea9ec8c93669526464b0aa19ed417b).
-
-### Package `embed`
-
-- Add [`embed.Config.ZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/11148) to allow creating a custom zap logger.
-
-### Dependency
-
-- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0) to [**`v1.23.1`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.1).
-
-### Go
-
-- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes.
-
-
-
-
-
-## v3.4.0 (2019-08-30)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0...v3.4.0) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
-
-- [v3.4.0](https://github.com/etcd-io/etcd/releases/tag/v3.4.0) (2019-08-30), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.4...v3.4.0).
-- [v3.4.0-rc.4](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.4) (2019-08-29), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.3...v3.4.0-rc.4).
-- [v3.4.0-rc.3](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.3) (2019-08-27), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.2...v3.4.0-rc.3).
-- [v3.4.0-rc.2](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.2) (2019-08-23), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.1...v3.4.0-rc.2).
-- [v3.4.0-rc.1](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.1) (2019-08-15), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0-rc.0...v3.4.0-rc.1).
-- [v3.4.0-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.4.0-rc.0) (2019-08-12), see [code changes](https://github.com/etcd-io/etcd/compare/v3.3.0...v3.4.0-rc.0).
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/).**
-
-### Documentation
-
-- etcd now has a new website! Please visit https://etcd.io.
-
-### Improved
-
-- Add Raft learner: [etcd#10725](https://github.com/etcd-io/etcd/pull/10725), [etcd#10727](https://github.com/etcd-io/etcd/pull/10727), [etcd#10730](https://github.com/etcd-io/etcd/pull/10730).
- - User guide: [runtime-configuration document](https://etcd.io/docs/latest/op-guide/runtime-configuration/#add-a-new-member-as-learner).
- - API change: [API reference document](https://etcd.io/docs/latest/dev-guide/api_reference_v3/).
- - More details on implementation: [learner design document](https://etcd.io/docs/latest/learning/design-learner/) and [implementation task list](https://github.com/etcd-io/etcd/issues/10537).
-- Rewrite [client balancer](https://github.com/etcd-io/etcd/pull/9860) with [new gRPC balancer interface](https://github.com/etcd-io/etcd/issues/9106).
- - Upgrade [gRPC to v1.23.0](https://github.com/etcd-io/etcd/pull/10911).
- - Improve [client balancer failover against secure endpoints](https://github.com/etcd-io/etcd/pull/10911).
- - Fix ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102).
- - Fix [gRPC panic "send on closed channel](https://github.com/etcd-io/etcd/issues/9956).
- - [The new client balancer](https://etcd.io/docs/latest/learning/design-client/) uses an asynchronous resolver to pass endpoints to the gRPC dial function. To block until the underlying connection is up, pass `grpc.WithBlock()` to `clientv3.Config.DialOptions`.
-- Add [backoff on watch retries on transient errors](https://github.com/etcd-io/etcd/pull/9840).
-- Add [jitter to watch progress notify](https://github.com/etcd-io/etcd/pull/9278) to prevent [spikes in `etcd_network_client_grpc_sent_bytes_total`](https://github.com/etcd-io/etcd/issues/9246).
-- Improve [read index wait timeout warning log](https://github.com/etcd-io/etcd/pull/10026), which indicates that local node might have slow network.
-- Improve [slow request apply warning log](https://github.com/etcd-io/etcd/pull/9288).
- - e.g. `read-only range request "key:\"/a\" range_end:\"/b\" " with result "range_response_count:3 size:96" took too long (97.966µs) to execute`.
- - Redact [request value field](https://github.com/etcd-io/etcd/pull/9822).
- - Provide [response size](https://github.com/etcd-io/etcd/pull/9826).
-- Improve ["became inactive" warning log](https://github.com/etcd-io/etcd/pull/10024), which indicates message send to a peer failed.
-- Improve [TLS setup error logging](https://github.com/etcd-io/etcd/pull/9518) to help debug [TLS-enabled cluster configuring issues](https://github.com/etcd-io/etcd/issues/9400).
-- Improve [long-running concurrent read transactions under light write workloads](https://github.com/etcd-io/etcd/pull/9296).
- - Previously, periodic commit on pending writes blocks incoming read transactions, even if there is no pending write.
- - Now, periodic commit operation does not block concurrent read transactions, thus improves long-running read transaction performance.
-- Make [backend read transactions fully concurrent](https://github.com/etcd-io/etcd/pull/10523).
- - Previously, ongoing long-running read transactions block writes and future reads.
- - With this change, write throughput is increased by 70% and P99 write latency is reduced by 90% in the presence of long-running reads.
-- Improve [Raft Read Index timeout warning messages](https://github.com/etcd-io/etcd/pull/9897).
-- Adjust [election timeout on server restart](https://github.com/etcd-io/etcd/pull/9415) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333).
- - Previously, etcd fast-forwards election ticks on server start, with only one tick left for leader election. This is to speed up start phase, without having to wait until all election ticks elapse. Advancing election ticks is useful for cross datacenter deployments with larger election timeouts. However, it was affecting cluster availability if the last tick elapses before leader contacts the restarted node.
- - Now, when etcd restarts, it adjusts election ticks with more than one tick left, thus more time for leader to prevent disruptive restart.
-- Add [Raft Pre-Vote feature](https://github.com/etcd-io/etcd/pull/9352) to reduce [disruptive rejoining servers](https://github.com/etcd-io/etcd/issues/9333).
- - For instance, a flaky(or rejoining) member may drop in and out, and start campaign. This member will end up with a higher term, and ignore all incoming messages with lower term. In this case, a new leader eventually need to get elected, thus disruptive to cluster availability. Raft implements Pre-Vote phase to prevent this kind of disruptions. If enabled, Raft runs an additional phase of election to check if pre-candidate can get enough votes to win an election.
-- Adjust [periodic compaction retention window](https://github.com/etcd-io/etcd/pull/9485).
- - e.g. `etcd --auto-compaction-mode=revision --auto-compaction-retention=1000` automatically `Compact` on `"latest revision" - 1000` every 5-minute (when latest revision is 30000, compact on revision 29000).
- - e.g. Previously, `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h` automatically `Compact` with 24-hour retention windown for every 2.4-hour. Now, `Compact` happens for every 1-hour.
- - e.g. Previously, `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` automatically `Compact` with 30-minute retention windown for every 3-minute. Now, `Compact` happens for every 30-minute.
- - Periodic compactor keeps recording latest revisions for every compaction period when given period is less than 1-hour, or for every 1-hour when given compaction period is greater than 1-hour (e.g. 1-hour when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`).
- - For every compaction period or 1-hour, compactor uses the last revision that was fetched before compaction period, to discard historical data.
- - The retention window of compaction period moves for every given compaction period or hour.
- - For instance, when hourly writes are 100 and `etcd --auto-compaction-mode=periodic --auto-compaction-retention=24h`, `v3.2.x`, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 2400, 2640, and 2880 for every 2.4-hour, while `v3.3.3` *or later* compacts revision 2400, 2500, 2600 for every 1-hour.
- - Furthermore, when `etcd --auto-compaction-mode=periodic --auto-compaction-retention=30m` and writes per minute are about 1000, `v3.3.0`, `v3.3.1`, and `v3.3.2` compact revision 30000, 33000, and 36000, for every 3-minute, while `v3.3.3` *or later* compacts revision 30000, 60000, and 90000, for every 30-minute.
-- Improve [lease expire/revoke operation performance](https://github.com/etcd-io/etcd/pull/9418), address [lease scalability issue](https://github.com/etcd-io/etcd/issues/9496).
-- Make [Lease `Lookup` non-blocking with concurrent `Grant`/`Revoke`](https://github.com/etcd-io/etcd/pull/9229).
-- Make etcd server return `raft.ErrProposalDropped` on internal Raft proposal drop in [v3 applier](https://github.com/etcd-io/etcd/pull/9549) and [v2 applier](https://github.com/etcd-io/etcd/pull/9558).
- - e.g. a node is removed from cluster, or [`raftpb.MsgProp` arrives at current leader while there is an ongoing leadership transfer](https://github.com/etcd-io/etcd/issues/8975).
-- Add [`snapshot`](https://github.com/etcd-io/etcd/pull/9118) package for easier snapshot workflow (see [`godoc.org/github.com/etcd/clientv3/snapshot`](https://godoc.org/github.com/etcd-io/etcd/clientv3/snapshot) for more).
-- Improve [functional tester](https://github.com/etcd-io/etcd/tree/main/functional) coverage: [proxy layer to run network fault tests in CI](https://github.com/etcd-io/etcd/pull/9081), [TLS is enabled both for server and client](https://github.com/etcd-io/etcd/pull/9534), [liveness mode](https://github.com/etcd-io/etcd/issues/9230), [shuffle test sequence](https://github.com/etcd-io/etcd/issues/9381), [membership reconfiguration failure cases](https://github.com/etcd-io/etcd/pull/9564), [disastrous quorum loss and snapshot recover from a seed member](https://github.com/etcd-io/etcd/pull/9565), [embedded etcd](https://github.com/etcd-io/etcd/pull/9572).
-- Improve [index compaction blocking](https://github.com/etcd-io/etcd/pull/9511) by using a copy on write clone to avoid holding the lock for the traversal of the entire index.
-- Update [JWT methods](https://github.com/etcd-io/etcd/pull/9883) to allow for use of any supported signature method/algorithm.
-- Add [Lease checkpointing](https://github.com/etcd-io/etcd/pull/9924) to persist remaining TTLs to the consensus log periodically so that long lived leases progress toward expiry in the presence of leader elections and server restarts.
- - Enabled by experimental flag "--experimental-enable-lease-checkpoint".
-- Add [gRPC interceptor for debugging logs](https://github.com/etcd-io/etcd/pull/9990); enable `etcd --debug` flag to see per-request debug information.
-- Add [consistency check in snapshot status](https://github.com/etcd-io/etcd/pull/10109). If consistency check on snapshot file fails, `snapshot status` returns `"snapshot file integrity check failed..."` error.
-- Add [`Verify` function to perform corruption check on WAL contents](https://github.com/etcd-io/etcd/pull/10603).
-- Improve [heartbeat send failure logging](https://github.com/etcd-io/etcd/pull/10663).
-- Support [users with no password](https://github.com/etcd-io/etcd/pull/9817) for reducing security risk introduced by leaked password. The users can only be authenticated with `CommonName` based auth.
-- Add `etcd --experimental-peer-skip-client-san-verification` to [skip verification of peer client address](https://github.com/etcd-io/etcd/pull/10524).
-- Add `etcd --experimental-compaction-batch-limit` to [sets the maximum revisions deleted in each compaction batch](https://github.com/etcd-io/etcd/pull/11034).
-- Reduced default compaction batch size from 10k revisions to 1k revisions to improve p99 latency during compactions and reduced wait between compactions from 100ms to 10ms.
-
-### Breaking Changes
-
-- Rewrite [client balancer](https://github.com/etcd-io/etcd/pull/9860) with [new gRPC balancer interface](https://github.com/etcd-io/etcd/issues/9106).
- - Upgrade [gRPC to v1.23.0](https://github.com/etcd-io/etcd/pull/10911).
- - Improve [client balancer failover against secure endpoints](https://github.com/etcd-io/etcd/pull/10911).
- - Fix ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available" (kubernetes#72102)](https://github.com/kubernetes/kubernetes/issues/72102).
- - Fix [gRPC panic "send on closed channel](https://github.com/etcd-io/etcd/issues/9956).
- - [The new client balancer](https://etcd.io/docs/latest/learning/design-client/) uses an asynchronous resolver to pass endpoints to the gRPC dial function. To block until the underlying connection is up, pass `grpc.WithBlock()` to `clientv3.Config.DialOptions`.
-- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045).
- - Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes.
-- Migrate dependency management tool from `glide` to [Go module](https://github.com/etcd-io/etcd/pull/10063).
- - <= 3.3 puts `vendor` directory under `cmd/vendor` directory to [prevent conflicting transitive dependencies](https://github.com/etcd-io/etcd/issues/4913).
- - 3.4 moves `cmd/vendor` directory to `vendor` at repository root.
- - Remove recursive symlinks in `cmd` directory.
- - Now `go get/install/build` on `etcd` packages (e.g. `clientv3`, `tools/benchmark`) enforce builds with etcd `vendor` directory.
-- Deprecated `latest` [release container](https://console.cloud.google.com/gcr/images/etcd-development/GLOBAL/etcd) tag.
- - **`docker pull gcr.io/etcd-development/etcd:latest` would not be up-to-date**.
-- Deprecated [minor](https://semver.org/) version [release container](https://console.cloud.google.com/gcr/images/etcd-development/GLOBAL/etcd) tags.
- - `docker pull gcr.io/etcd-development/etcd:v3.3` would still work.
- - **`docker pull gcr.io/etcd-development/etcd:v3.4` would not work**.
- - Use **`docker pull gcr.io/etcd-development/etcd:v3.4.x`** instead, with the exact patch version.
-- Deprecated [ACIs from official release](https://github.com/etcd-io/etcd/pull/9059).
- - [AppC was officially suspended](https://github.com/appc/spec#-disclaimer-), as of late 2016.
- - [`acbuild`](https://github.com/containers/build#this-project-is-currently-unmaintained) is not maintained anymore.
- - `*.aci` files are not available from `v3.4` release.
-- Move [`"github.com/coreos/etcd"`](https://github.com/etcd-io/etcd/issues/9965) to [`"github.com/etcd-io/etcd"`](https://github.com/etcd-io/etcd/issues/9965).
- - Change import path to `"go.etcd.io/etcd"`.
- - e.g. `import "go.etcd.io/etcd/raft"`.
-- Make [`ETCDCTL_API=3 etcdctl` default](https://github.com/etcd-io/etcd/issues/9600).
- - Now, `etcdctl set foo bar` must be `ETCDCTL_API=2 etcdctl set foo bar`.
- - Now, `ETCDCTL_API=3 etcdctl put foo bar` could be just `etcdctl put foo bar`.
-- Make [`etcd --enable-v2=false` default](https://github.com/etcd-io/etcd/pull/10935).
-- Make [`embed.DefaultEnableV2` `false` default](https://github.com/etcd-io/etcd/pull/10935).
-- **Deprecated `etcd --ca-file` flag**. Use [`etcd --trusted-ca-file`](https://github.com/etcd-io/etcd/pull/9470) instead (`etcd --ca-file` flag has been marked deprecated since v2.1).
-- **Deprecated `etcd --peer-ca-file` flag**. Use [`etcd --peer-trusted-ca-file`](https://github.com/etcd-io/etcd/pull/9470) instead (`etcd --peer-ca-file` flag has been marked deprecated since v2.1).
-- **Deprecated `pkg/transport.TLSInfo.CAFile` field**. Use [`pkg/transport.TLSInfo.TrustedCAFile`](https://github.com/etcd-io/etcd/pull/9470) instead (`CAFile` field has been marked deprecated since v2.1).
-- Exit on [empty hosts in advertise URLs](https://github.com/etcd-io/etcd/pull/8786).
- - Address [advertise client URLs accepts empty hosts](https://github.com/etcd-io/etcd/issues/8379).
- - e.g. exit with error on `--advertise-client-urls=http://:2379`.
- - e.g. exit with error on `--initial-advertise-peer-urls=http://:2380`.
-- Exit on [shadowed environment variables](https://github.com/etcd-io/etcd/pull/9382).
- - Address [error on shadowed environment variables](https://github.com/etcd-io/etcd/issues/8380).
- - e.g. exit with error on `ETCD_NAME=abc etcd --name=def`.
- - e.g. exit with error on `ETCD_INITIAL_CLUSTER_TOKEN=abc etcd --initial-cluster-token=def`.
- - e.g. exit with error on `ETCDCTL_ENDPOINTS=abc.com ETCDCTL_API=3 etcdctl endpoint health --endpoints=def.com`.
-- Change [`etcdserverpb.AuthRoleRevokePermissionRequest/key,range_end` fields type from `string` to `bytes`](https://github.com/etcd-io/etcd/pull/9433).
-- Deprecating `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) instead.
-- Deprecating `etcd_debugging_mvcc_put_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_put_total`](https://github.com/etcd-io/etcd/pull/10962) instead.
-- Deprecating `etcd_debugging_mvcc_delete_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_delete_total`](https://github.com/etcd-io/etcd/pull/10962) instead.
-- Deprecating `etcd_debugging_mvcc_range_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_range_total`](https://github.com/etcd-io/etcd/pull/10968) instead.
-- Deprecating `etcd_debugging_mvcc_txn_total`Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_txn_total`](https://github.com/etcd-io/etcd/pull/10968) instead.
-- Rename `etcdserver.ServerConfig.SnapCount` field to `etcdserver.ServerConfig.SnapshotCount`, to be consistent with the flag name `etcd --snapshot-count`.
-- Rename `embed.Config.SnapCount` field to [`embed.Config.SnapshotCount`](https://github.com/etcd-io/etcd/pull/9745), to be consistent with the flag name `etcd --snapshot-count`.
-- Change [`embed.Config.CorsInfo` in `*cors.CORSInfo` type to `embed.Config.CORS` in `map[string]struct{}` type](https://github.com/etcd-io/etcd/pull/9490).
-- Deprecated [`embed.Config.SetupLogging`](https://github.com/etcd-io/etcd/pull/9572).
- - Now logger is set up automatically based on [`embed.Config.Logger`, `embed.Config.LogOutputs`, `embed.Config.Debug` fields](https://github.com/etcd-io/etcd/pull/9572).
-- Rename [`etcd --log-output` to `etcd --log-outputs`](https://github.com/etcd-io/etcd/pull/9624) to support multiple log outputs.
- - **`etcd --log-output`** will be deprecated in v3.5.
-- Rename [**`embed.Config.LogOutput`** to **`embed.Config.LogOutputs`**](https://github.com/etcd-io/etcd/pull/9624) to support multiple log outputs.
-- Change [**`embed.Config.LogOutputs`** type from `string` to `[]string`](https://github.com/etcd-io/etcd/pull/9579) to support multiple log outputs.
- - Now that `etcd --log-outputs` accepts multiple writers, etcd configuration YAML file `log-outputs` field must be changed to `[]string` type.
- - Previously, `etcd --config-file etcd.config.yaml` can have `log-outputs: default` field, now must be `log-outputs: [default]`.
-- Deprecating [`etcd --debug`](https://github.com/etcd-io/etcd/pull/10947) flag. Use `etcd --log-level=debug` flag instead.
- - v3.5 will deprecate `etcd --debug` flag in favor of `etcd --log-level=debug`.
-- Change v3 `etcdctl snapshot` exit codes with [`snapshot` package](https://github.com/etcd-io/etcd/pull/9118/commits/df689f4280e1cce4b9d61300be13ca604d41670a).
- - Exit on error with exit code 1 (no more exit code 5 or 6 on `snapshot save/restore` commands).
-- Deprecated [`grpc.ErrClientConnClosing`](https://github.com/etcd-io/etcd/pull/10981).
- - `clientv3` and `proxy/grpcproxy` now does not return `grpc.ErrClientConnClosing`.
- - `grpc.ErrClientConnClosing` has been [deprecated in gRPC >= 1.10](https://github.com/grpc/grpc-go/pull/1854).
- - Use `clientv3.IsConnCanceled(error)` or `google.golang.org/grpc/status.FromError(error)` instead.
-- Deprecated [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint `/v3beta` with [`/v3`](https://github.com/etcd-io/etcd/pull/9298).
- - Deprecated [`/v3alpha`](https://github.com/etcd-io/etcd/pull/9298).
- - To deprecate [`/v3beta`](https://github.com/etcd-io/etcd/issues/9189) in v3.5.
- - In v3.4, `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` still works as a fallback to `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'`, but `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` won't work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
-- Change [`wal` package function signatures](https://github.com/etcd-io/etcd/pull/9572) to support [structured logger and logging to file](https://github.com/etcd-io/etcd/issues/9438) in server-side.
- - Previously, `Open(dirpath string, snap walpb.Snapshot) (*WAL, error)`, now `Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error)`.
- - Previously, `OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error)`, now `OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error)`.
- - Previously, `Repair(dirpath string) bool`, now `Repair(lg *zap.Logger, dirpath string) bool`.
- - Previously, `Create(dirpath string, metadata []byte) (*WAL, error)`, now `Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error)`.
-- Remove [`pkg/cors` package](https://github.com/etcd-io/etcd/pull/9490).
-- Move internal packages to `etcdserver`.
- - `"github.com/coreos/etcd/alarm"` to `"go.etcd.io/etcd/etcdserver/api/v3alarm"`.
- - `"github.com/coreos/etcd/compactor"` to `"go.etcd.io/etcd/etcdserver/api/v3compactor"`.
- - `"github.com/coreos/etcd/discovery"` to `"go.etcd.io/etcd/etcdserver/api/v2discovery"`.
- - `"github.com/coreos/etcd/etcdserver/auth"` to `"go.etcd.io/etcd/etcdserver/api/v2auth"`.
- - `"github.com/coreos/etcd/etcdserver/membership"` to `"go.etcd.io/etcd/etcdserver/api/membership"`.
- - `"github.com/coreos/etcd/etcdserver/stats"` to `"go.etcd.io/etcd/etcdserver/api/v2stats"`.
- - `"github.com/coreos/etcd/error"` to `"go.etcd.io/etcd/etcdserver/api/v2error"`.
- - `"github.com/coreos/etcd/rafthttp"` to `"go.etcd.io/etcd/etcdserver/api/rafthttp"`.
- - `"github.com/coreos/etcd/snap"` to `"go.etcd.io/etcd/etcdserver/api/snap"`.
- - `"github.com/coreos/etcd/store"` to `"go.etcd.io/etcd/etcdserver/api/v2store"`.
-- Change [snapshot file permissions](https://github.com/etcd-io/etcd/pull/9977): On Linux, the snapshot file changes from readable by all (mode 0644) to readable by the user only (mode 0600).
-- Change [`pkg/adt.IntervalTree` from `struct` to `interface`](https://github.com/etcd-io/etcd/pull/10959).
- - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt).
-- Release branch `/version` defines version `3.4.x-pre`, instead of `3.4.y+git`.
- - Use `3.4.5-pre`, instead of `3.4.4+git`.
-
-### Dependency
-
-- Upgrade [`github.com/coreos/bbolt`](https://github.com/etcd-io/bbolt/releases) from [**`v1.3.1-coreos.6`**](https://github.com/etcd-io/bbolt/releases/tag/v1.3.1-coreos.6) to [`go.etcd.io/bbolt`](https://github.com/etcd-io/bbolt/releases) [**`v1.3.3`**](https://github.com/etcd-io/bbolt/releases/tag/v1.3.3).
-- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5) to [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0).
-- Migrate [`github.com/ugorji/go/codec`](https://github.com/ugorji/go/releases) to [**`github.com/json-iterator/go`**](https://github.com/json-iterator/go), to [regenerate v2 `client`](https://github.com/etcd-io/etcd/pull/9494) (See [#10667](https://github.com/etcd-io/etcd/pull/10667) for more).
-- Migrate [`github.com/ghodss/yaml`](https://github.com/ghodss/yaml/releases) to [**`sigs.k8s.io/yaml`**](https://github.com/kubernetes-sigs/yaml) (See [#10687](https://github.com/etcd-io/etcd/pull/10687) for more).
-- Upgrade [`golang.org/x/crypto`](https://github.com/golang/crypto) from [**`crypto@9419663f5`**](https://github.com/golang/crypto/commit/9419663f5a44be8b34ca85f08abc5fe1be11f8a3) to [**`crypto@0709b304e793`**](https://github.com/golang/crypto/commit/0709b304e793a5edb4a2c0145f281ecdc20838a4).
-- Upgrade [`golang.org/x/net`](https://github.com/golang/net) from [**`net@66aacef3d`**](https://github.com/golang/net/commit/66aacef3dd8a676686c7ae3716979581e8b03c47) to [**`net@adae6a3d119a`**](https://github.com/golang/net/commit/adae6a3d119ae4890b46832a2e88a95adc62b8e7).
-- Upgrade [`golang.org/x/sys`](https://github.com/golang/sys) from [**`sys@ebfc5b463`**](https://github.com/golang/sys/commit/ebfc5b4631820b793c9010c87fd8fef0f39eb082) to [**`sys@c7b8b68b1456`**](https://github.com/golang/sys/commit/c7b8b68b14567162c6602a7c5659ee0f26417c18).
-- Upgrade [`golang.org/x/text`](https://github.com/golang/text) from [**`text@b19bf474d`**](https://github.com/golang/text/commit/b19bf474d317b857955b12035d2c5acb57ce8b01) to [**`v0.3.0`**](https://github.com/golang/text/releases/tag/v0.3.0).
-- Upgrade [`golang.org/x/time`](https://github.com/golang/time) from [**`time@c06e80d93`**](https://github.com/golang/time/commit/c06e80d9300e4443158a03817b8a8cb37d230320) to [**`time@fbb02b229`**](https://github.com/golang/time/commit/fbb02b2291d28baffd63558aa44b4b56f178d650).
-- Upgrade [`github.com/golang/protobuf`](https://github.com/golang/protobuf/releases) from [**`golang/protobuf@1e59b77b5`**](https://github.com/golang/protobuf/commit/1e59b77b52bf8e4b449a57e6f79f21226d571845) to [**`v1.3.2`**](https://github.com/golang/protobuf/releases/tag/v1.3.2).
-- Upgrade [`gopkg.in/yaml.v2`](https://github.com/go-yaml/yaml/releases) from [**`yaml@cd8b52f82`**](https://github.com/go-yaml/yaml/commit/cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b) to [**`yaml@5420a8b67`**](https://github.com/go-yaml/yaml/commit/5420a8b6744d3b0345ab293f6fcba19c978f1183).
-- Upgrade [`github.com/dgrijalva/jwt-go`](https://github.com/dgrijalva/jwt-go/releases) from [**`v3.0.0`**](https://github.com/dgrijalva/jwt-go/releases/tag/v3.0.0) to [**`v3.2.0`**](https://github.com/dgrijalva/jwt-go/releases/tag/v3.2.0).
-- Upgrade [`github.com/soheilhy/cmux`](https://github.com/soheilhy/cmux/releases) from [**`v0.1.3`**](https://github.com/soheilhy/cmux/releases/tag/v0.1.3) to [**`v0.1.4`**](https://github.com/soheilhy/cmux/releases/tag/v0.1.4).
-- Upgrade [`github.com/google/btree`](https://github.com/google/btree/releases) from [**`google/btree@925471ac9`**](https://github.com/google/btree/commit/925471ac9e2131377a91e1595defec898166fe49) to [**`v1.0.0`**](https://github.com/google/btree/releases/tag/v1.0.0).
-- Upgrade [`github.com/spf13/cobra`](https://github.com/spf13/cobra/releases) from [**`spf13/cobra@1c44ec8d3`**](https://github.com/spf13/cobra/commit/1c44ec8d3f1552cac48999f9306da23c4d8a288b) to [**`v0.0.3`**](https://github.com/spf13/cobra/releases/tag/v0.0.3).
-- Upgrade [`github.com/spf13/pflag`](https://github.com/spf13/pflag/releases) from [**`v1.0.0`**](https://github.com/spf13/pflag/releases/tag/v1.0.0) to [**`spf13/pflag@1ce0cc6db`**](https://github.com/spf13/pflag/commit/1ce0cc6db4029d97571db82f85092fccedb572ce).
-- Upgrade [`github.com/coreos/go-systemd`](https://github.com/coreos/go-systemd/releases) from [**`v15`**](https://github.com/coreos/go-systemd/releases/tag/v15) to [**`v17`**](https://github.com/coreos/go-systemd/releases/tag/v17).
-- Upgrade [`github.com/prometheus/client_golang`](https://github.com/prometheus/client_golang/releases) from [**``prometheus/client_golang@5cec1d042``**](https://github.com/prometheus/client_golang/commit/5cec1d0429b02e4323e042eb04dafdb079ddf568) to [**`v1.0.0`**](https://github.com/prometheus/client_golang/releases/tag/v1.0.0).
-- Upgrade [`github.com/grpc-ecosystem/go-grpc-prometheus`](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases) from [**``grpc-ecosystem/go-grpc-prometheus@0dafe0d49``**](https://github.com/grpc-ecosystem/go-grpc-prometheus/commit/0dafe0d496ea71181bf2dd039e7e3f44b6bd11a7) to [**`v1.2.0`**](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0).
-- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) from [**`v1.3.1`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3.1) to [**`v1.4.1`**](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.4.1).
-- Migrate [`github.com/kr/pty`](https://github.com/kr/pty/releases) to [**`github.com/creack/pty`**](https://github.com/creack/pty/releases/tag/v1.1.7), as the later has replaced the original module.
-- Upgrade [`github.com/gogo/protobuf`](https://github.com/gogo/protobuf/releases) from [**`v1.0.0`**](https://github.com/gogo/protobuf/releases/tag/v1.0.0) to [**`v1.2.1`**](https://github.com/gogo/protobuf/releases/tag/v1.2.1).
-
-### Metrics, Monitoring
-
-See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Add [`etcd_snap_db_fsync_duration_seconds_count`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_snap_db_save_total_duration_seconds_bucket`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_send_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_success`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_failures`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_total_duration_seconds`](https://github.com/etcd-io/etcd/pull/9997) Prometheus metric.
-- Add [`etcd_network_active_peers`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric.
- - Let's say `"7339c4e5e833c029"` server `/metrics` returns `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="729934363faa4a24"} 1` and `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="b548c2511513015"} 1`. This indicates that the local node `"7339c4e5e833c029"` currently has two active remote peers `"729934363faa4a24"` and `"b548c2511513015"` in a 3-node cluster. If the node `"b548c2511513015"` is down, the local node `"7339c4e5e833c029"` will show `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="729934363faa4a24"} 1` and `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="b548c2511513015"} 0`.
-- Add [`etcd_network_disconnected_peers_total`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric.
- - If a remote peer `"b548c2511513015"` is down, the local node `"7339c4e5e833c029"` server `/metrics` would return `etcd_network_disconnected_peers_total{Local="7339c4e5e833c029",Remote="b548c2511513015"} 1`, while active peer metrics will show `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="729934363faa4a24"} 1` and `etcd_network_active_peers{Local="7339c4e5e833c029",Remote="b548c2511513015"} 0`.
-- Add [`etcd_network_server_stream_failures_total`](https://github.com/etcd-io/etcd/pull/9760) Prometheus metric.
- - e.g. `etcd_network_server_stream_failures_total{API="lease-keepalive",Type="receive"} 1`
- - e.g. `etcd_network_server_stream_failures_total{API="watch",Type="receive"} 1`
-- Improve [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/10155) Prometheus metric to track leader heartbeats.
- - Previously, it only samples the TCP connection for snapshot messages.
-- Increase [`etcd_network_peer_round_trip_time_seconds`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric histogram upper-bound.
- - Previously, highest bucket only collects requests taking 0.8192 seconds or more.
- - Now, highest buckets collect 0.8192 seconds, 1.6384 seconds, and 3.2768 seconds or more.
-- Add [`etcd_server_is_leader`](https://github.com/etcd-io/etcd/pull/9587) Prometheus metric.
-- Add [`etcd_server_id`](https://github.com/etcd-io/etcd/pull/9998) Prometheus metric.
-- Add [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/10257) Prometheus metric.
-- Add [`etcd_server_version`](https://github.com/etcd-io/etcd/pull/8960) Prometheus metric.
- - To replace [Kubernetes `etcd-version-monitor`](https://github.com/etcd-io/etcd/issues/8948).
-- Add [`etcd_server_go_version`](https://github.com/etcd-io/etcd/pull/9957) Prometheus metric.
-- Add [`etcd_server_health_success`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric.
-- Add [`etcd_server_health_failures`](https://github.com/etcd-io/etcd/pull/10156) Prometheus metric.
-- Add [`etcd_server_read_indexes_failed_total`](https://github.com/etcd-io/etcd/pull/10094) Prometheus metric.
-- Add [`etcd_server_heartbeat_send_failures_total`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric.
-- Add [`etcd_server_slow_apply_total`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric.
-- Add [`etcd_server_slow_read_indexes_total`](https://github.com/etcd-io/etcd/pull/9897) Prometheus metric.
-- Add [`etcd_server_quota_backend_bytes`](https://github.com/etcd-io/etcd/pull/9820) Prometheus metric.
- - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`.
- - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB.
- - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB.
- - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete.
- - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation.
-- Add [`etcd_mvcc_db_total_size_in_use_in_bytes`](https://github.com/etcd-io/etcd/pull/9256) Prometheus metric.
- - Use it with `etcd_mvcc_db_total_size_in_bytes` and `etcd_mvcc_db_total_size_in_use_in_bytes`.
- - `etcd_server_quota_backend_bytes 2.147483648e+09` means current quota size is 2 GB.
- - `etcd_mvcc_db_total_size_in_bytes 20480` means current physically allocated DB size is 20 KB.
- - `etcd_mvcc_db_total_size_in_use_in_bytes 16384` means future DB size if defragment operation is complete.
- - `etcd_mvcc_db_total_size_in_bytes - etcd_mvcc_db_total_size_in_use_in_bytes` is the number of bytes that can be saved on disk with defragment operation.
-- Add [`etcd_mvcc_db_open_read_transactions`](https://github.com/etcd-io/etcd/pull/10523/commits/ad80752715aaed449629369687c5fd30eb1bda76) Prometheus metric.
-- Add [`etcd_snap_fsync_duration_seconds`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric.
-- Add [`etcd_disk_backend_defrag_duration_seconds`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric.
-- Add [`etcd_mvcc_hash_duration_seconds`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric.
-- Add [`etcd_mvcc_hash_rev_duration_seconds`](https://github.com/etcd-io/etcd/pull/9761) Prometheus metric.
-- Add [`etcd_debugging_disk_backend_commit_rebalance_duration_seconds`](https://github.com/etcd-io/etcd/pull/9834) Prometheus metric.
-- Add [`etcd_debugging_disk_backend_commit_spill_duration_seconds`](https://github.com/etcd-io/etcd/pull/9834) Prometheus metric.
-- Add [`etcd_debugging_disk_backend_commit_write_duration_seconds`](https://github.com/etcd-io/etcd/pull/9834) Prometheus metric.
-- Add [`etcd_debugging_lease_granted_total`](https://github.com/etcd-io/etcd/pull/9778) Prometheus metric.
-- Add [`etcd_debugging_lease_revoked_total`](https://github.com/etcd-io/etcd/pull/9778) Prometheus metric.
-- Add [`etcd_debugging_lease_renewed_total`](https://github.com/etcd-io/etcd/pull/9778) Prometheus metric.
-- Add [`etcd_debugging_lease_ttl_total`](https://github.com/etcd-io/etcd/pull/9778) Prometheus metric.
-- Add [`etcd_network_snapshot_send_inflights_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric.
-- Add [`etcd_network_snapshot_receive_inflights_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric.
-- Add [`etcd_server_snapshot_apply_in_progress_total`](https://github.com/etcd-io/etcd/pull/11009) Prometheus metric.
-- Add [`etcd_server_is_learner`](https://github.com/etcd-io/etcd/pull/10731) Prometheus metric.
-- Add [`etcd_server_learner_promote_failures`](https://github.com/etcd-io/etcd/pull/10731) Prometheus metric.
-- Add [`etcd_server_learner_promote_successes`](https://github.com/etcd-io/etcd/pull/10731) Prometheus metric.
-- Increase [`etcd_debugging_mvcc_index_compaction_pause_duration_milliseconds`](https://github.com/etcd-io/etcd/pull/9762) Prometheus metric histogram upper-bound.
- - Previously, highest bucket only collects requests taking 1.024 seconds or more.
- - Now, highest buckets collect 1.024 seconds, 2.048 seconds, and 4.096 seconds or more.
-- Fix missing [`etcd_network_peer_sent_failures_total`](https://github.com/etcd-io/etcd/pull/9437) Prometheus metric count.
-- Fix [`etcd_debugging_server_lease_expired_total`](https://github.com/etcd-io/etcd/pull/9557) Prometheus metric.
-- Fix [race conditions in v2 server stat collecting](https://github.com/etcd-io/etcd/pull/9562).
-- Change [gRPC proxy to expose etcd server endpoint /metrics](https://github.com/etcd-io/etcd/pull/10618).
- - The metrics that were exposed via the proxy were not etcd server members but instead the proxy itself.
-- Fix bug where [db_compaction_total_duration_milliseconds metric incorrectly measured duration as 0](https://github.com/etcd-io/etcd/pull/10646).
-- Deprecating `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_db_total_size_in_bytes`](https://github.com/etcd-io/etcd/pull/9819) instead.
-- Deprecating `etcd_debugging_mvcc_put_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_put_total`](https://github.com/etcd-io/etcd/pull/10962) instead.
-- Deprecating `etcd_debugging_mvcc_delete_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_delete_total`](https://github.com/etcd-io/etcd/pull/10962) instead.
-- Deprecating `etcd_debugging_mvcc_range_total` Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_range_total`](https://github.com/etcd-io/etcd/pull/10968) instead.
-- Deprecating `etcd_debugging_mvcc_txn_total`Prometheus metric (to be removed in v3.5). Use [`etcd_mvcc_txn_total`](https://github.com/etcd-io/etcd/pull/10968) instead.
-
-### Security, Authentication
-
-See [security doc](https://etcd.io/docs/latest/op-guide/security/) for more details.
-
-- Support TLS cipher suite whitelisting.
- - To block [weak cipher suites](https://github.com/etcd-io/etcd/issues/8320).
- - TLS handshake fails when client hello is requested with invalid cipher suites.
- - Add [`etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/9801) flag.
- - If empty, Go auto-populates the list.
-- Add [`etcd --host-whitelist`](https://github.com/etcd-io/etcd/pull/9372) flag, [`etcdserver.Config.HostWhitelist`](https://github.com/etcd-io/etcd/pull/9372), and [`embed.Config.HostWhitelist`](https://github.com/etcd-io/etcd/pull/9372), to prevent ["DNS Rebinding"](https://en.wikipedia.org/wiki/DNS_rebinding) attack.
- - Any website can simply create an authorized DNS name, and direct DNS to `"localhost"` (or any other address). Then, all HTTP endpoints of etcd server listening on `"localhost"` becomes accessible, thus vulnerable to [DNS rebinding attacks (CVE-2018-5702)](https://bugs.chromium.org/p/project-zero/issues/detail?id=1447#c2).
- - Client origin enforce policy works as follow:
- - If client connection is secure via HTTPS, allow any hostnames..
- - If client connection is not secure and `"HostWhitelist"` is not empty, only allow HTTP requests whose Host field is listed in whitelist.
- - By default, `"HostWhitelist"` is `"*"`, which means insecure server allows all client HTTP requests.
- - Note that the client origin policy is enforced whether authentication is enabled or not, for tighter controls.
- - When specifying hostnames, loopback addresses are not added automatically. To allow loopback interfaces, add them to whitelist manually (e.g. `"localhost"`, `"127.0.0.1"`, etc.).
- - e.g. `etcd --host-whitelist example.com`, then the server will reject all HTTP requests whose Host field is not `example.com` (also rejects requests to `"localhost"`).
-- Support [`etcd --cors`](https://github.com/etcd-io/etcd/pull/9490) in v3 HTTP requests (gRPC gateway).
-- Support [`ttl` field for `etcd` Authentication JWT token](https://github.com/etcd-io/etcd/pull/8302).
- - e.g. `etcd --auth-token jwt,pub-key=,priv-key=,sign-method=,ttl=5m`.
-- Allow empty token provider in [`etcdserver.ServerConfig.AuthToken`](https://github.com/etcd-io/etcd/pull/9369).
-- Fix [TLS reload](https://github.com/etcd-io/etcd/pull/9570) when [certificate SAN field only includes IP addresses but no domain names](https://github.com/etcd-io/etcd/issues/9541).
- - In Go, server calls `(*tls.Config).GetCertificate` for TLS reload if and only if server's `(*tls.Config).Certificates` field is not empty, or `(*tls.ClientHelloInfo).ServerName` is not empty with a valid SNI from the client. Previously, etcd always populates `(*tls.Config).Certificates` on the initial client TLS handshake, as non-empty. Thus, client was always expected to supply a matching SNI in order to pass the TLS verification and to trigger `(*tls.Config).GetCertificate` to reload TLS assets.
- - However, a certificate whose SAN field does [not include any domain names but only IP addresses](https://github.com/etcd-io/etcd/issues/9541) would request `*tls.ClientHelloInfo` with an empty `ServerName` field, thus failing to trigger the TLS reload on initial TLS handshake; this becomes a problem when expired certificates need to be replaced online.
- - Now, `(*tls.Config).Certificates` is created empty on initial TLS client handshake, first to trigger `(*tls.Config).GetCertificate`, and then to populate rest of the certificates on every new TLS connection, even when client SNI is empty (e.g. cert only includes IPs).
-
-### etcd server
-
-- Add [`rpctypes.ErrLeaderChanged`](https://github.com/etcd-io/etcd/pull/10094).
- - Now linearizable requests with read index would fail fast when there is a leadership change, instead of waiting until context timeout.
-- Add [`etcd --initial-election-tick-advance`](https://github.com/etcd-io/etcd/pull/9591) flag to configure initial election tick fast-forward.
- - By default, `etcd --initial-election-tick-advance=true`, then local member fast-forwards election ticks to speed up "initial" leader election trigger.
- - This benefits the case of larger election ticks. For instance, cross datacenter deployment may require longer election timeout of 10-second. If true, local node does not need wait up to 10-second. Instead, forwards its election ticks to 8-second, and have only 2-second left before leader election.
- - Major assumptions are that: cluster has no active leader thus advancing ticks enables faster leader election. Or cluster already has an established leader, and rejoining follower is likely to receive heartbeats from the leader after tick advance and before election timeout.
- - However, when network from leader to rejoining follower is congested, and the follower does not receive leader heartbeat within left election ticks, disruptive election has to happen thus affecting cluster availabilities.
- - Now, this can be disabled by setting `etcd --initial-election-tick-advance=false`.
- - Disabling this would slow down initial bootstrap process for cross datacenter deployments. Make tradeoffs by configuring `etcd --initial-election-tick-advance` at the cost of slow initial bootstrap.
- - If single-node, it advances ticks regardless.
- - Address [disruptive rejoining follower node](https://github.com/etcd-io/etcd/issues/9333).
-- Add [`etcd --pre-vote`](https://github.com/etcd-io/etcd/pull/9352) flag to enable to run an additional Raft election phase.
- - For instance, a flaky(or rejoining) member may drop in and out, and start campaign. This member will end up with a higher term, and ignore all incoming messages with lower term. In this case, a new leader eventually need to get elected, thus disruptive to cluster availability. Raft implements Pre-Vote phase to prevent this kind of disruptions. If enabled, Raft runs an additional phase of election to check if pre-candidate can get enough votes to win an election.
- - `etcd --pre-vote=false` by default.
- - v3.5 will enable `etcd --pre-vote=true` by default.
-- Add `etcd --experimental-compaction-batch-limit` to [sets the maximum revisions deleted in each compaction batch](https://github.com/etcd-io/etcd/pull/11034).
-- Reduced default compaction batch size from 10k revisions to 1k revisions to improve p99 latency during compactions and reduced wait between compactions from 100ms to 10ms.
-- Add [`etcd --discovery-srv-name`](https://github.com/etcd-io/etcd/pull/8690) flag to support custom DNS SRV name with discovery.
- - If not given, etcd queries `_etcd-server-ssl._tcp.[YOUR_HOST]` and `_etcd-server._tcp.[YOUR_HOST]`.
- - If `etcd --discovery-srv-name="foo"`, then query `_etcd-server-ssl-foo._tcp.[YOUR_HOST]` and `_etcd-server-foo._tcp.[YOUR_HOST]`.
- - Useful for operating multiple etcd clusters under the same domain.
-- Support TLS cipher suite whitelisting.
- - To block [weak cipher suites](https://github.com/etcd-io/etcd/issues/8320).
- - TLS handshake fails when client hello is requested with invalid cipher suites.
- - Add [`etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/9801) flag.
- - If empty, Go auto-populates the list.
-- Support [`etcd --cors`](https://github.com/etcd-io/etcd/pull/9490) in v3 HTTP requests (gRPC gateway).
-- Rename [`etcd --log-output` to `etcd --log-outputs`](https://github.com/etcd-io/etcd/pull/9624) to support multiple log outputs.
- - **`etcd --log-output` will be deprecated in v3.5**.
-- Add [`etcd --logger`](https://github.com/etcd-io/etcd/pull/9572) flag to support [structured logger and multiple log outputs](https://github.com/etcd-io/etcd/issues/9438) in server-side.
- - **`etcd --logger=capnslog` will be deprecated in v3.5**.
- - Main motivation is to promote automated etcd monitoring, rather than looking back server logs when it starts breaking. Future development will make etcd log as few as possible, and make etcd easier to monitor with metrics and alerts.
- - `etcd --logger=capnslog --log-outputs=default` is the default setting and same as previous etcd server logging format.
- - `etcd --logger=zap --log-outputs=default` is not supported when `etcd --logger=zap`.
- - Use `etcd --logger=zap --log-outputs=stderr` instead.
- - Or, use `etcd --logger=zap --log-outputs=systemd/journal` to send logs to the local systemd journal.
- - Previously, if etcd parent process ID (PPID) is 1 (e.g. run with systemd), `etcd --logger=capnslog --log-outputs=default` redirects server logs to local systemd journal. And if write to journald fails, it writes to `os.Stderr` as a fallback.
- - However, even with PPID 1, it can fail to dial systemd journal (e.g. run embedded etcd with Docker container). Then, [every single log write will fail](https://github.com/etcd-io/etcd/pull/9729) and fall back to `os.Stderr`, which is inefficient.
- - To avoid this problem, systemd journal logging must be configured manually.
- - `etcd --logger=zap --log-outputs=stderr` will log server operations in [JSON-encoded format](https://godoc.org/go.uber.org/zap#NewProductionEncoderConfig) and writes logs to `os.Stderr`. Use this to override journald log redirects.
- - `etcd --logger=zap --log-outputs=stdout` will log server operations in [JSON-encoded format](https://godoc.org/go.uber.org/zap#NewProductionEncoderConfig) and writes logs to `os.Stdout` Use this to override journald log redirects.
- - `etcd --logger=zap --log-outputs=a.log` will log server operations in [JSON-encoded format](https://godoc.org/go.uber.org/zap#NewProductionEncoderConfig) and writes logs to the specified file `a.log`.
- - `etcd --logger=zap --log-outputs=a.log,b.log,c.log,stdout` [writes server logs to multiple files `a.log`, `b.log` and `c.log` at the same time](https://github.com/etcd-io/etcd/pull/9579) and outputs to `os.Stderr`, in [JSON-encoded format](https://godoc.org/go.uber.org/zap#NewProductionEncoderConfig).
- - `etcd --logger=zap --log-outputs=/dev/null` will discard all server logs.
-- Add [`etcd --log-level`](https://github.com/etcd-io/etcd/pull/10947) flag to support log level.
- - v3.5 will deprecate `etcd --debug` flag in favor of `etcd --log-level=debug`.
-- Add [`etcd --backend-batch-limit`](https://github.com/etcd-io/etcd/pull/10283) flag.
-- Add [`etcd --backend-batch-interval`](https://github.com/etcd-io/etcd/pull/10283) flag.
-- Fix [`mvcc` "unsynced" watcher restore operation](https://github.com/etcd-io/etcd/pull/9281).
- - "unsynced" watcher is watcher that needs to be in sync with events that have happened.
- - That is, "unsynced" watcher is the slow watcher that was requested on old revision.
- - "unsynced" watcher restore operation was not correctly populating its underlying watcher group.
- - Which possibly causes [missing events from "unsynced" watchers](https://github.com/etcd-io/etcd/issues/9086).
- - A node gets network partitioned with a watcher on a future revision, and falls behind receiving a leader snapshot after partition gets removed. When applying this snapshot, etcd watch storage moves current synced watchers to unsynced since sync watchers might have become stale during network partition. And reset synced watcher group to restart watcher routines. Previously, there was a bug when moving from synced watcher group to unsynced, thus client would miss events when the watcher was requested to the network-partitioned node.
-- Fix [`mvcc` server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775).
- - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation.
- - Now, this server-side panic has been fixed.
-- Fix [server panic on invalid Election Proclaim/Resign HTTP(S) requests](https://github.com/etcd-io/etcd/pull/9379).
- - Previously, wrong-formatted HTTP requests to Election API could trigger panic in etcd server.
- - e.g. `curl -L http://localhost:2379/v3/election/proclaim -X POST -d '{"value":""}'`, `curl -L http://localhost:2379/v3/election/resign -X POST -d '{"value":""}'`.
-- Fix [revision-based compaction retention parsing](https://github.com/etcd-io/etcd/pull/9339).
- - Previously, `etcd --auto-compaction-mode revision --auto-compaction-retention 1` was [translated to revision retention 3600000000000](https://github.com/etcd-io/etcd/issues/9337).
- - Now, `etcd --auto-compaction-mode revision --auto-compaction-retention 1` is correctly parsed as revision retention 1.
-- Prevent [overflow by large `TTL` values for `Lease` `Grant`](https://github.com/etcd-io/etcd/pull/9399).
- - `TTL` parameter to `Grant` request is unit of second.
- - Leases with too large `TTL` values exceeding `math.MaxInt64` [expire in unexpected ways](https://github.com/etcd-io/etcd/issues/9374).
- - Server now returns `rpctypes.ErrLeaseTTLTooLarge` to client, when the requested `TTL` is larger than *9,000,000,000 seconds* (which is >285 years).
- - Again, etcd `Lease` is meant for short-periodic keepalives or sessions, in the range of seconds or minutes. Not for hours or days!
-- Fix [expired lease revoke](https://github.com/etcd-io/etcd/pull/10693).
- - Fix ["the key is not deleted when the bound lease expires"](https://github.com/etcd-io/etcd/issues/10686).
-- Enable etcd server [`raft.Config.CheckQuorum` when starting with `ForceNewCluster`](https://github.com/etcd-io/etcd/pull/9347).
-- Allow [non-WAL files in `etcd --wal-dir` directory](https://github.com/etcd-io/etcd/pull/9743).
- - Previously, existing files such as [`lost+found`](https://github.com/etcd-io/etcd/issues/7287) in WAL directory prevent etcd server boot.
- - Now, WAL directory that contains only `lost+found` or a file that's not suffixed with `.wal` is considered non-initialized.
-- Fix [`ETCD_CONFIG_FILE` env variable parsing in `etcd`](https://github.com/etcd-io/etcd/pull/10762).
-- Fix [race condition in `rafthttp` transport pause/resume](https://github.com/etcd-io/etcd/pull/10826).
-- Fix [server crash from creating an empty role](https://github.com/etcd-io/etcd/pull/10907).
- - Previously, creating a role with an empty name crashed etcd server with an error code `Unavailable`.
- - Now, creating a role with an empty name is not allowed with an error code `InvalidArgument`.
-
-### API
-
-- Add `isLearner` field to `etcdserverpb.Member`, `etcdserverpb.MemberAddRequest` and `etcdserverpb.StatusResponse` as part of [raft learner implementation](https://github.com/etcd-io/etcd/pull/10725).
-- Add `MemberPromote` rpc to `etcdserverpb.Cluster` interface and the corresponding `MemberPromoteRequest` and `MemberPromoteResponse` as part of [raft learner implementation](https://github.com/etcd-io/etcd/pull/10725).
-- Add [`snapshot`](https://github.com/etcd-io/etcd/pull/9118) package for snapshot restore/save operations (see [`godoc.org/github.com/etcd/clientv3/snapshot`](https://godoc.org/github.com/coreos/etcd/clientv3/snapshot) for more).
-- Add [`watch_id` field to `etcdserverpb.WatchCreateRequest`](https://github.com/etcd-io/etcd/pull/9065) to allow user-provided watch ID to `mvcc`.
- - Corresponding `watch_id` is returned via `etcdserverpb.WatchResponse`, if any.
-- Add [`fragment` field to `etcdserverpb.WatchCreateRequest`](https://github.com/etcd-io/etcd/pull/9291) to request etcd server to [split watch events](https://github.com/etcd-io/etcd/issues/9294) when the total size of events exceeds `etcd --max-request-bytes` flag value plus gRPC-overhead 512 bytes.
- - The default server-side request bytes limit is `embed.DefaultMaxRequestBytes` which is 1.5 MiB plus gRPC-overhead 512 bytes.
- - If watch response events exceed this server-side request limit and watch request is created with `fragment` field `true`, the server will split watch events into a set of chunks, each of which is a subset of watch events below server-side request limit.
- - Useful when client-side has limited bandwidths.
- - For example, watch response contains 10 events, where each event is 1 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB. Then, server will send 10 separate fragmented events to the client.
- - For example, watch response contains 5 events, where each event is 2 MiB. And server `etcd --max-recv-bytes` flag value is 1 MiB and `clientv3.Config.MaxCallRecvMsgSize` is 1 MiB. Then, server will try to send 5 separate fragmented events to the client, and the client will error with `"code = ResourceExhausted desc = grpc: received message larger than max (...)"`.
- - Client must implement fragmented watch event merge (which `clientv3` does in etcd v3.4).
-- Add [`raftAppliedIndex` field to `etcdserverpb.StatusResponse`](https://github.com/etcd-io/etcd/pull/9176) for current Raft applied index.
-- Add [`errors` field to `etcdserverpb.StatusResponse`](https://github.com/etcd-io/etcd/pull/9206) for server-side error.
- - e.g. `"etcdserver: no leader", "NOSPACE", "CORRUPT"`
-- Add [`dbSizeInUse` field to `etcdserverpb.StatusResponse`](https://github.com/etcd-io/etcd/pull/9256) for actual DB size after compaction.
-- Add [`WatchRequest.WatchProgressRequest`](https://github.com/etcd-io/etcd/pull/9869).
- - To manually trigger broadcasting watch progress event (empty watch response with latest header) to all associated watch streams.
- - Think of it as `WithProgressNotify` that can be triggered manually.
-
-Note: **v3.5 will deprecate `etcd --log-package-levels` flag for `capnslog`**; `etcd --logger=zap --log-outputs=stderr` will the default. **v3.5 will deprecate `[CLIENT-URL]/config/local/log` endpoint.**
-
-### Package `embed`
-
-- Add [`embed.Config.CipherSuites`](https://github.com/etcd-io/etcd/pull/9801) to specify a list of supported cipher suites for TLS handshake between client/server and peers.
- - If empty, Go auto-populates the list.
- - Both `embed.Config.ClientTLSInfo.CipherSuites` and `embed.Config.CipherSuites` cannot be non-empty at the same time.
- - If not empty, specify either `embed.Config.ClientTLSInfo.CipherSuites` or `embed.Config.CipherSuites`.
-- Add [`embed.Config.InitialElectionTickAdvance`](https://github.com/etcd-io/etcd/pull/9591) to enable/disable initial election tick fast-forward.
- - `embed.NewConfig()` would return `*embed.Config` with `InitialElectionTickAdvance` as true by default.
-- Define [`embed.CompactorModePeriodic`](https://godoc.org/github.com/etcd-io/etcd/embed#pkg-variables) for `compactor.ModePeriodic`.
-- Define [`embed.CompactorModeRevision`](https://godoc.org/github.com/etcd-io/etcd/embed#pkg-variables) for `compactor.ModeRevision`.
-- Change [`embed.Config.CorsInfo` in `*cors.CORSInfo` type to `embed.Config.CORS` in `map[string]struct{}` type](https://github.com/etcd-io/etcd/pull/9490).
-- Remove [`embed.Config.SetupLogging`](https://github.com/etcd-io/etcd/pull/9572).
- - Now logger is set up automatically based on [`embed.Config.Logger`, `embed.Config.LogOutputs`, `embed.Config.Debug` fields](https://github.com/etcd-io/etcd/pull/9572).
-- Add [`embed.Config.Logger`](https://github.com/etcd-io/etcd/pull/9518) to support [structured logger `zap`](https://github.com/uber-go/zap) in server-side.
-- Add [`embed.Config.LogLevel`](https://github.com/etcd-io/etcd/pull/10947).
-- Rename `embed.Config.SnapCount` field to [`embed.Config.SnapshotCount`](https://github.com/etcd-io/etcd/pull/9745), to be consistent with the flag name `etcd --snapshot-count`.
-- Rename [**`embed.Config.LogOutput`** to **`embed.Config.LogOutputs`**](https://github.com/etcd-io/etcd/pull/9624) to support multiple log outputs.
-- Change [**`embed.Config.LogOutputs`** type from `string` to `[]string`](https://github.com/etcd-io/etcd/pull/9579) to support multiple log outputs.
-- Add [`embed.Config.BackendBatchLimit`](https://github.com/etcd-io/etcd/pull/10283) field.
-- Add [`embed.Config.BackendBatchInterval`](https://github.com/etcd-io/etcd/pull/10283) field.
-- Make [`embed.DefaultEnableV2` `false` default](https://github.com/etcd-io/etcd/pull/10935).
-
-### Package `pkg/adt`
-
-- Change [`pkg/adt.IntervalTree` from `struct` to `interface`](https://github.com/etcd-io/etcd/pull/10959).
- - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt).
-- Improve [`pkg/adt.IntervalTree` test coverage](https://github.com/etcd-io/etcd/pull/10959).
- - See [`pkg/adt` README](https://github.com/etcd-io/etcd/tree/main/pkg/adt) and [`pkg/adt` godoc](https://godoc.org/go.etcd.io/etcd/pkg/adt).
-- Fix [Red-Black tree to maintain black-height property](https://github.com/etcd-io/etcd/pull/10978).
- - Previously, delete operation violates [black-height property](https://github.com/etcd-io/etcd/issues/10965).
-
-### Package `integration`
-
-- Add [`CLUSTER_DEBUG` to enable test cluster logging](https://github.com/etcd-io/etcd/pull/9678).
- - Deprecated `capnslog` in integration tests.
-
-### client v3
-
-- Add [`MemberAddAsLearner`](https://github.com/etcd-io/etcd/pull/10725) to `Clientv3.Cluster` interface. This API is used to add a learner member to etcd cluster.
-- Add [`MemberPromote`](https://github.com/etcd-io/etcd/pull/10727) to `Clientv3.Cluster` interface. This API is used to promote a learner member in etcd cluster.
-- Client may receive [`rpctypes.ErrLeaderChanged`](https://github.com/etcd-io/etcd/pull/10094) from server.
- - Now linearizable requests with read index would fail fast when there is a leadership change, instead of waiting until context timeout.
-- Add [`WithFragment` `OpOption`](https://github.com/etcd-io/etcd/pull/9291) to support [watch events fragmentation](https://github.com/etcd-io/etcd/issues/9294) when the total size of events exceeds `etcd --max-request-bytes` flag value plus gRPC-overhead 512 bytes.
- - Watch fragmentation is disabled by default.
- - The default server-side request bytes limit is `embed.DefaultMaxRequestBytes` which is 1.5 MiB plus gRPC-overhead 512 bytes.
- - If watch response events exceed this server-side request limit and watch request is created with `fragment` field `true`, the server will split watch events into a set of chunks, each of which is a subset of watch events below server-side request limit.
- - Useful when client-side has limited bandwidths.
- - For example, watch response contains 10 events, where each event is 1 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB. Then, server will send 10 separate fragmented events to the client.
- - For example, watch response contains 5 events, where each event is 2 MiB. And server `etcd --max-request-bytes` flag value is 1 MiB and `clientv3.Config.MaxCallRecvMsgSize` is 1 MiB. Then, server will try to send 5 separate fragmented events to the client, and the client will error with `"code = ResourceExhausted desc = grpc: received message larger than max (...)"`.
-- Add [`Watcher.RequestProgress` method](https://github.com/etcd-io/etcd/pull/9869).
- - To manually trigger broadcasting watch progress event (empty watch response with latest header) to all associated watch streams.
- - Think of it as `WithProgressNotify` that can be triggered manually.
-- Fix [lease keepalive interval updates when response queue is full](https://github.com/etcd-io/etcd/pull/9952).
- - If `<-chan *clientv3LeaseKeepAliveResponse` from `clientv3.Lease.KeepAlive` was never consumed or channel is full, client was [sending keepalive request every 500ms](https://github.com/etcd-io/etcd/issues/9911) instead of expected rate of every "TTL / 3" duration.
-- Change [snapshot file permissions](https://github.com/etcd-io/etcd/pull/9977): On Linux, the snapshot file changes from readable by all (mode 0644) to readable by the user only (mode 0600).
-- Client may choose to send keepalive pings to server using [`PermitWithoutStream`](https://github.com/etcd-io/etcd/pull/10146).
- - By setting `PermitWithoutStream` to true, client can send keepalive pings to server without any active streams(RPCs). In other words, it allows sending keepalive pings with unary or simple RPC calls.
- - `PermitWithoutStream` is set to false by default.
-- Fix logic on [release lock key if cancelled](https://github.com/etcd-io/etcd/pull/10153) in `clientv3/concurrency` package.
-- Fix [`(*Client).Endpoints()` method race condition](https://github.com/etcd-io/etcd/pull/10595).
-- Deprecated [`grpc.ErrClientConnClosing`](https://github.com/etcd-io/etcd/pull/10981).
- - `clientv3` and `proxy/grpcproxy` now does not return `grpc.ErrClientConnClosing`.
- - `grpc.ErrClientConnClosing` has been [deprecated in gRPC >= 1.10](https://github.com/grpc/grpc-go/pull/1854).
- - Use `clientv3.IsConnCanceled(error)` or `google.golang.org/grpc/status.FromError(error)` instead.
-
-### etcdctl v3
-
-- Make [`ETCDCTL_API=3 etcdctl` default](https://github.com/etcd-io/etcd/issues/9600).
- - Now, `etcdctl set foo bar` must be `ETCDCTL_API=2 etcdctl set foo bar`.
- - Now, `ETCDCTL_API=3 etcdctl put foo bar` could be just `etcdctl put foo bar`.
-- Add [`etcdctl member add --learner` and `etcdctl member promote`](https://github.com/etcd-io/etcd/pull/10725) to add and promote raft learner member in etcd cluster.
-- Add [`etcdctl --password`](https://github.com/etcd-io/etcd/pull/9730) flag.
- - To support [`:` character in user name](https://github.com/etcd-io/etcd/issues/9691).
- - e.g. `etcdctl --user user --password password get foo`
-- Add [`etcdctl user add --new-user-password`](https://github.com/etcd-io/etcd/pull/9730) flag.
-- Add [`etcdctl check datascale`](https://github.com/etcd-io/etcd/pull/9185) command.
-- Add [`etcdctl check datascale --auto-compact, --auto-defrag`](https://github.com/etcd-io/etcd/pull/9351) flags.
-- Add [`etcdctl check perf --auto-compact, --auto-defrag`](https://github.com/etcd-io/etcd/pull/9330) flags.
-- Add [`etcdctl defrag --cluster`](https://github.com/etcd-io/etcd/pull/9390) flag.
-- Add ["raft applied index" field to `endpoint status`](https://github.com/etcd-io/etcd/pull/9176).
-- Add ["errors" field to `endpoint status`](https://github.com/etcd-io/etcd/pull/9206).
-- Add [`etcdctl endpoint health --write-out` support](https://github.com/etcd-io/etcd/pull/9540).
- - Previously, [`etcdctl endpoint health --write-out json` did not work](https://github.com/etcd-io/etcd/issues/9532).
-- Add [missing newline in `etcdctl endpoint health`](https://github.com/etcd-io/etcd/pull/10793).
-- Fix [`etcdctl watch [key] [range_end] -- [exec-command…]`](https://github.com/etcd-io/etcd/pull/9688) parsing.
- - Previously, `ETCDCTL_API=3 etcdctl watch foo -- echo watch event received` panicked.
-- Fix [`etcdctl move-leader` command for TLS-enabled endpoints](https://github.com/etcd-io/etcd/pull/9807).
-- Add [`progress` command to `etcdctl watch --interactive`](https://github.com/etcd-io/etcd/pull/9869).
- - To manually trigger broadcasting watch progress event (empty watch response with latest header) to all associated watch streams.
- - Think of it as `WithProgressNotify` that can be triggered manually.
-- Add [timeout](https://github.com/etcd-io/etcd/pull/10301) to `etcdctl snapshot
- save`.
- - User can specify timeout of `etcdctl snapshot save` command using flag `--command-timeout`.
- - Fix etcdctl to [strip out insecure endpoints from DNS SRV records when using discovery](https://github.com/etcd-io/etcd/pull/10443)
-
-### gRPC proxy
-
-- Fix [etcd server panic from restore operation](https://github.com/etcd-io/etcd/pull/9775).
- - Let's assume that a watcher had been requested with a future revision X and sent to node A that became network-partitioned thereafter. Meanwhile, cluster makes progress. Then when the partition gets removed, the leader sends a snapshot to node A. Previously if the snapshot's latest revision is still lower than the watch revision X, **etcd server panicked** during snapshot restore operation.
- - Especially, gRPC proxy was affected, since it detects a leader loss with a key `"proxy-namespace__lostleader"` and a watch revision `"int64(math.MaxInt64 - 2)"`.
- - Now, this server-side panic has been fixed.
-- Fix [memory leak in cache layer](https://github.com/etcd-io/etcd/pull/10327).
-- Change [gRPC proxy to expose etcd server endpoint /metrics](https://github.com/etcd-io/etcd/pull/10618).
- - The metrics that were exposed via the proxy were not etcd server members but instead the proxy itself.
-
-### gRPC gateway
-
-- Replace [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint `/v3beta` with [`/v3`](https://github.com/etcd-io/etcd/pull/9298).
- - Deprecated [`/v3alpha`](https://github.com/etcd-io/etcd/pull/9298).
- - To deprecate [`/v3beta`](https://github.com/etcd-io/etcd/issues/9189) in v3.5.
- - In v3.4, `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` still works as a fallback to `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'`, but `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` won't work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
-- Add API endpoints [`/{v3beta,v3}/lease/leases, /{v3beta,v3}/lease/revoke, /{v3beta,v3}/lease/timetolive`](https://github.com/etcd-io/etcd/pull/9450).
- - To deprecate [`/{v3beta,v3}/kv/lease/leases, /{v3beta,v3}/kv/lease/revoke, /{v3beta,v3}/kv/lease/timetolive`](https://github.com/etcd-io/etcd/issues/9430) in v3.5.
-- Support [`etcd --cors`](https://github.com/etcd-io/etcd/pull/9490) in v3 HTTP requests (gRPC gateway).
-
-### Package `raft`
-
-- Fix [deadlock during PreVote migration process](https://github.com/etcd-io/etcd/pull/8525).
-- Add [`raft.ErrProposalDropped`](https://github.com/etcd-io/etcd/pull/9067).
- - Now [`(r *raft) Step` returns `raft.ErrProposalDropped`](https://github.com/etcd-io/etcd/pull/9137) if a proposal has been ignored.
- - e.g. a node is removed from cluster, or [`raftpb.MsgProp` arrives at current leader while there is an ongoing leadership transfer](https://github.com/etcd-io/etcd/issues/8975).
-- Improve [Raft `becomeLeader` and `stepLeader`](https://github.com/etcd-io/etcd/pull/9073) by keeping track of latest `pb.EntryConfChange` index.
- - Previously record `pendingConf` boolean field scanning the entire tail of the log, which can delay heartbeat send.
-- Fix [missing learner nodes on `(n *node) ApplyConfChange`](https://github.com/etcd-io/etcd/pull/9116).
-- Add [`raft.Config.MaxUncommittedEntriesSize`](https://github.com/etcd-io/etcd/pull/10167) to limit the total size of the uncommitted entries in bytes.
- - Once exceeded, raft returns `raft.ErrProposalDropped` error.
- - Prevent [unbounded Raft log growth](https://github.com/cockroachdb/cockroach/issues/27772).
- - There was a bug in [PR#10167](https://github.com/etcd-io/etcd/pull/10167) but fixed via [PR#10199](https://github.com/etcd-io/etcd/pull/10199).
-- Add [`raft.Ready.CommittedEntries` pagination using `raft.Config.MaxSizePerMsg`](https://github.com/etcd-io/etcd/pull/9982).
- - This prevents out-of-memory errors if the raft log has become very large and commits all at once.
- - Fix [correctness bug in CommittedEntries pagination](https://github.com/etcd-io/etcd/pull/10063).
-- Optimize [message send flow control](https://github.com/etcd-io/etcd/pull/9985).
- - Leader now sends more append entries if it has more non-empty entries to send after updating flow control information.
- - Now, Raft allows multiple in-flight append messages.
-- Optimize [memory allocation when boxing slice in `maybeCommit`](https://github.com/etcd-io/etcd/pull/10679).
- - By boxing a heap-allocated slice header instead of the slice header on the stack, we can avoid an allocation when passing through the sort.Interface interface.
-- Avoid [memory allocation in Raft entry `String` method](https://github.com/etcd-io/etcd/pull/10680).
-- Avoid [multiple memory allocations when merging stable and unstable log](https://github.com/etcd-io/etcd/pull/10684).
-- Extract [progress tracking into own component](https://github.com/etcd-io/etcd/pull/10683).
- - Add [package `raft/tracker`](https://github.com/etcd-io/etcd/pull/10807).
- - Optimize [string representation of `Progress`](https://github.com/etcd-io/etcd/pull/10882).
-- Make [relationship between `node` and `RawNode` explicit](https://github.com/etcd-io/etcd/pull/10803).
-- Prevent [learners from becoming leader](https://github.com/etcd-io/etcd/pull/10822).
-- Add [package `raft/quorum` to reason about committed indexes as well as vote outcomes for both majority and joint quorums](https://github.com/etcd-io/etcd/pull/10779).
- - Bundle [Voters and Learner into `raft/tracker.Config` struct](https://github.com/etcd-io/etcd/pull/10865).
-- Use [membership sets in progress tracking](https://github.com/etcd-io/etcd/pull/10779).
-- Implement [joint quorum computation](https://github.com/etcd-io/etcd/pull/10779).
-- Refactor [`raft/node.go` to centralize configuration change application](https://github.com/etcd-io/etcd/pull/10865).
-- Allow [voter to become learner through snapshot](https://github.com/etcd-io/etcd/pull/10864).
-- Add [package `raft/confchange` to internally support joint consensus](https://github.com/etcd-io/etcd/pull/10779).
-- Use [`RawNode` for node's event loop](https://github.com/etcd-io/etcd/pull/10892).
-- Add [`RawNode.Bootstrap` method](https://github.com/etcd-io/etcd/pull/10892).
-- Add [`raftpb.ConfChangeV2` to use joint quorums](https://github.com/etcd-io/etcd/pull/10914).
- - `raftpb.ConfChange` continues to work as today: it allows carrying out a single configuration change. A `pb.ConfChange` proposal gets added to the Raft log as such and is thus also observed by the app during Ready handling, and fed back to ApplyConfChange.
- - `raftpb.ConfChangeV2` allows joint configuration changes but will continue to carry out configuration changes in "one phase" (i.e. without ever entering a joint config) when this is possible.
- - `raftpb.ConfChangeV2` messages initiate configuration changes. They support both the simple "one at a time" membership change protocol and full Joint Consensus allowing for arbitrary changes in membership.
-- Change [`raftpb.ConfState.Nodes` to `raftpb.ConfState.Voters`](https://github.com/etcd-io/etcd/pull/10914).
-- Allow [learners to vote, but still learners do not count in quorum](https://github.com/etcd-io/etcd/pull/10998).
- - necessary in the situation in which a learner has been promoted (i.e. is now a voter) but has not learned about this yet.
-- Fix [restoring joint consensus](https://github.com/etcd-io/etcd/pull/11003).
-- Visit [`Progress` in stable order](https://github.com/etcd-io/etcd/pull/11004).
-- Proactively [probe newly added followers](https://github.com/etcd-io/etcd/pull/11037).
- - The general expectation in `tracker.Progress.Next == c.LastIndex` is that the follower has no log at all (and will thus likely need a snapshot), though the app may have applied a snapshot out of band before adding the replica (thus making the first index the better choice).
- - Previously, when the leader applied a new configuration that added voters, it would not immediately probe these voters, delaying when they would be caught up.
-
-### Package `wal`
-
-- Add [`Verify` function to perform corruption check on WAL contents](https://github.com/etcd-io/etcd/pull/10603).
-- Fix [`wal` directory cleanup on creation failures](https://github.com/etcd-io/etcd/pull/10689).
-
-### Tooling
-
-- Add [`etcd-dump-logs --entry-type`](https://github.com/etcd-io/etcd/pull/9628) flag to support WAL log filtering by entry type.
-- Add [`etcd-dump-logs --stream-decoder`](https://github.com/etcd-io/etcd/pull/9790) flag to support custom decoder.
-- Add [`SHA256SUMS`](https://github.com/etcd-io/etcd/pull/11087) file to release assets.
- - etcd maintainers are a distributed team, this change allows for releases to be cut and validation provided without requiring a signing key.
-
-### Go
-
-- Require [*Go 1.12+*](https://github.com/etcd-io/etcd/pull/10045).
-- Compile with [*Go 1.12.9*](https://golang.org/doc/devel/release.html#go1.12) including [*Go 1.12.8*](https://groups.google.com/d/msg/golang-announce/65QixT3tcmg/DrFiG6vvCwAJ) security fixes.
-
-### Dockerfile
-
-- [Rebase etcd image from Alpine to Debian](https://github.com/etcd-io/etcd/pull/10805) to improve security and maintenance effort for etcd release.
-
-
-
diff --git a/CHANGELOG/CHANGELOG-3.5.md b/CHANGELOG/CHANGELOG-3.5.md
deleted file mode 100644
index 5919842ff74..00000000000
--- a/CHANGELOG/CHANGELOG-3.5.md
+++ /dev/null
@@ -1,467 +0,0 @@
-
-
-Previous change logs can be found at [CHANGELOG-3.4](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.4.md).
-
-
-
-## v3.5.8 (TBD)
-
-### Package `netutil`
-- Fix [consistently format IPv6 addresses for comparison](https://github.com/etcd-io/etcd/pull/15187)
-
-### Dependency
-- Bump bbolt to [v1.3.7](https://github.com/etcd-io/etcd/pull/15222).
-
-### Other
-- [Remove nsswitch.conf from docker image](https://github.com/etcd-io/etcd/pull/15161)
-
-
-
-## v3.5.7 (2023-01-20)
-
-### etcd server
-- Fix [Remove memberID from data corrupt alarm](https://github.com/etcd-io/etcd/pull/14852).
-- Fix [Allow non mutating requests pass through quotaKVServer when NOSPACE](https://github.com/etcd-io/etcd/pull/14884).
-- Fix [nil pointer panic for readonly txn due to nil response](https://github.com/etcd-io/etcd/pull/14899).
-- Fix [The last record which was partially synced to disk isn't automatically repaired](https://github.com/etcd-io/etcd/pull/15069).
-- Fix [etcdserver might promote a non-started learner](https://github.com/etcd-io/etcd/pull/15096).
-
-### Package `clientv3`
-- Reverted the fix to [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/14995).
-
-### Security
-- Use [distroless base image](https://github.com/etcd-io/etcd/pull/15016) to address critical Vulnerabilities.
-- Updated [base image from base-debian11 to static-debian11 and removed dependency on busybox](https://github.com/etcd-io/etcd/pull/15037).
-- Bumped [some dependencies](https://github.com/etcd-io/etcd/pull/15018) to address some HIGH Vulnerabilities.
-
-### Go
-- Require [Go 1.17+](https://github.com/etcd-io/etcd/pull/15019).
-- Compile with [Go 1.17+](https://go.dev/doc/devel/release#go1.17)
-
-
-
-## v3.5.6 (2022-11-21)
-
-### etcd server
-- Fix [auth invalid token and old revision errors in watch](https://github.com/etcd-io/etcd/pull/14547)
-- Fix [avoid closing a watch with ID 0 incorrectly](https://github.com/etcd-io/etcd/pull/14563)
-- Fix [auth: fix data consistency issue caused by recovery from snapshot](https://github.com/etcd-io/etcd/pull/14648)
-- Fix [revision might be inconsistency between members when etcd crashes during processing defragmentation operation](https://github.com/etcd-io/etcd/pull/14733)
-- Fix [timestamp in inconsistent format](https://github.com/etcd-io/etcd/pull/14799)
-- Fix [Failed resolving host due to lost DNS record](https://github.com/etcd-io/etcd/pull/14573)
-
-### Package `clientv3`
-- Fix [Add backoff before retry when watch stream returns unavailable](https://github.com/etcd-io/etcd/pull/14582).
-- Fix [stack overflow error in double barrier](https://github.com/etcd-io/etcd/pull/14658)
-- Fix [Refreshing token on CommonName based authentication causes segmentation violation in client](https://github.com/etcd-io/etcd/pull/14790).
-
-### etcd grpc-proxy
-- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14500) flag to support adding configurable cipher list.
-
-
-
-## v3.5.5 (2022-09-15)
-
-### Deprecations
-- Deprecated [SetKeepAlive and SetKeepAlivePeriod in limitListenerConn](https://github.com/etcd-io/etcd/pull/14366).
-
-### Package `clientv3`
-- Fix [do not overwrite authTokenBundle on dial](https://github.com/etcd-io/etcd/pull/14132).
-- Fix [IsOptsWithPrefix returns false even if WithPrefix() is included](https://github.com/etcd-io/etcd/pull/14187).
-
-### etcd server
-- [Build official darwin/arm64 artifacts](https://github.com/etcd-io/etcd/pull/14436).
-- Add [`etcd --max-concurrent-streams`](https://github.com/etcd-io/etcd/pull/14219) flag to configure the max concurrent streams each client can open at a time, and defaults to math.MaxUint32.
-- Add [`etcd --experimental-compact-hash-check-enabled --experimental-compact-hash-check-time`](https://github.com/etcd-io/etcd/issues/14039) flags to support enabling reliable corruption detection on compacted revisions.
-- Fix [unexpected error during txn](https://github.com/etcd-io/etcd/issues/14110).
-- Fix [lease leak issue due to tokenProvider isn't enabled when restoring auth store from a snapshot](https://github.com/etcd-io/etcd/pull/13205).
-- Fix [the race condition between goroutine and channel on the same leases to be revoked](https://github.com/etcd-io/etcd/pull/14087).
-- Fix [lessor may continue to schedule checkpoint after stepping down leader role](https://github.com/etcd-io/etcd/pull/14087).
-- Fix [Restrict the max size of each WAL entry to the remaining size of the WAL file](https://github.com/etcd-io/etcd/pull/14127).
-- Fix [Protect rangePermCache with a RW lock correctly](https://github.com/etcd-io/etcd/pull/14227)
-- Fix [memberID equals zero in corruption alarm](https://github.com/etcd-io/etcd/pull/14272)
-- Fix [Durability API guarantee broken in single node cluster](https://github.com/etcd-io/etcd/pull/14424)
-- Fix [etcd fails to start after performing alarm list operation and then power off/on](https://github.com/etcd-io/etcd/pull/14429)
-- Fix [authentication data not loaded on member startup](https://github.com/etcd-io/etcd/pull/14409)
-
-### etcdctl v3
-
-- Fix [etcdctl move-leader may fail for multiple endpoints](https://github.com/etcd-io/etcd/pull/14434)
-
-
-### Other
-- [Bump golang.org/x/crypto to latest version](https://github.com/etcd-io/etcd/pull/13996) to address [CVE-2022-27191](https://github.com/advisories/GHSA-8c26-wmh5-6g9v).
-- [Bump OpenTelemetry to 1.0.1 and gRPC to 1.41.0](https://github.com/etcd-io/etcd/pull/14312).
-
-
-
-## v3.5.4 (2022-04-24)
-
-### etcd server
-- Fix [etcd panic on startup (auth enabled)](https://github.com/etcd-io/etcd/pull/13946)
-
-### package `client/pkg/v3`
-
-- [Revert the change of trimming the trailing dot from SRV.Target](https://github.com/etcd-io/etcd/pull/13950) returned by DNS lookup
-
-
-
-
-## v3.5.3 (2022-04-13)
-
-### etcd server
-- Fix [Provide a better liveness probe for when etcd runs as a Kubernetes pod](https://github.com/etcd-io/etcd/pull/13706)
-- Fix [inconsistent log format](https://github.com/etcd-io/etcd/pull/13864)
-- Fix [Inconsistent revision and data occurs](https://github.com/etcd-io/etcd/pull/13908)
-- Fix [Etcdserver is still in progress of processing LeaseGrantRequest when it receives a LeaseKeepAliveRequest on the same leaseID](https://github.com/etcd-io/etcd/pull/13932)
-- Fix [consistent_index coming from snapshot is overwritten by the old local value](https://github.com/etcd-io/etcd/pull/13933)
-- [Update container base image snapshot](https://github.com/etcd-io/etcd/pull/13862)
-- Fix [Defrag unsets backend options](https://github.com/etcd-io/etcd/pull/13701).
-
-### package `client/pkg/v3`
-
-- [Trim the suffix dot from the target](https://github.com/etcd-io/etcd/pull/13714) in SRV records returned by DNS lookup
-
-### etcdctl v3
-
-- [Always print the raft_term in decimal](https://github.com/etcd-io/etcd/pull/13727) when displaying member list in json.
-
-
-
-## [v3.5.2](https://github.com/etcd-io/etcd/releases/tag/v3.5.2) (2022-02-01)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.1...v3.5.2) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes.
-
-### etcd server
-- Fix [exclude the same alarm type activated by multiple peers](https://github.com/etcd-io/etcd/pull/13476).
-- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to enable checkpoint persisting.
-- Fix [Lease checkpoints don't prevent to reset ttl on leader change](https://github.com/etcd-io/etcd/pull/13508), requires enabling checkpoint persisting.
-- Fix [assertion failed due to tx closed when recovering v3 backend from a snapshot db](https://github.com/etcd-io/etcd/pull/13501)
-- Fix [segmentation violation(SIGSEGV) error due to premature unlocking of watchableStore](https://github.com/etcd-io/etcd/pull/13541)
-
-
-
-## [v3.5.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.1) (2021-10-15)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.5.1) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes.
-
-### etcd server
-
-- Fix [self-signed-cert-validity parameter cannot be specified in the config file](https://github.com/etcd-io/etcd/pull/13237).
-- Fix [ensure that cluster members stored in v2store and backend are in sync](https://github.com/etcd-io/etcd/pull/13348)
-
-### etcd client
-
-- [Fix etcd client sends invalid :authority header](https://github.com/etcd-io/etcd/issues/13192)
-
-### package clientv3
-
-- Endpoints self identify now as `etcd-endpoints://{id}/{authority}` where authority is based on first endpoint passed, for example `etcd-endpoints://0xc0009d8540/localhost:2079`
-
-### Other
-
-- Updated [base image](https://github.com/etcd-io/etcd/pull/13386) from `debian:buster-v1.4.0` to `debian:bullseye-20210927` to fix the following critical CVEs:
- - [CVE-2021-3711](https://nvd.nist.gov/vuln/detail/CVE-2021-3711): miscalculation of a buffer size in openssl's SM2 decryption
- - [CVE-2021-35942](https://nvd.nist.gov/vuln/detail/CVE-2021-35942): integer overflow flaw in glibc
- - [CVE-2019-9893](https://nvd.nist.gov/vuln/detail/CVE-2019-9893): incorrect syscall argument generation in libseccomp
- - [CVE-2021-36159](https://nvd.nist.gov/vuln/detail/CVE-2021-36159): libfetch in apk-tools mishandles numeric strings in FTP and HTTP protocols to allow out of bound reads.
-
-
-
-## v3.5.0 (2021-06)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.5.0) and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/) for any breaking changes.
-
-- [v3.5.0](https://github.com/etcd-io/etcd/releases/tag/v3.5.0) (2021 TBD), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-rc.1...v3.5.0).
-- [v3.5.0-rc.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-rc.1) (2021-06-10), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-rc.0...v3.5.0-rc.1).
-- [v3.5.0-rc.0](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-rc.0) (2021-06-04), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.4...v3.5.0-rc.0).
-- [v3.5.0-beta.4](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.4) (2021-05-26), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.3...v3.5.0-beta.4).
-- [v3.5.0-beta.3](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.3) (2021-05-18), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.2...v3.5.0-beta.3).
-- [v3.5.0-beta.2](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.2) (2021-05-18), see [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0-beta.1...v3.5.0-beta.2).
-- [v3.5.0-beta.1](https://github.com/etcd-io/etcd/releases/tag/v3.5.0-beta.1) (2021-05-18), see [code changes](https://github.com/etcd-io/etcd/compare/v3.4.0...v3.5.0-beta.1).
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v3.5 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_5/).**
-
-### Breaking Changes
-
-- `go.etcd.io/etcd` Go packages have moved to `go.etcd.io/etcd/{api,pkg,raft,client,etcdctl,server,raft,tests}/v3` to follow the [Go modules](https://github.com/golang/go/wiki/Modules) conventions
-- `go.etcd.io/clientv3/snapshot` SnapshotManager class have moved to `go.etcd.io/clientv3/etcdctl`.
- The method `snapshot.Save` to download a snapshot from the remote server was preserved in 'go.etcd.io/clientv3/snapshot`.
-- `go.etcd.io/client' package got migrated to 'go.etcd.io/client/v2'.
-- Changed behavior of clientv3 API [MemberList](https://github.com/etcd-io/etcd/pull/11639).
- - Previously, it is directly served with server's local data, which could be stale.
- - Now, it is served with linearizable guarantee. If the server is disconnected from quorum, `MemberList` call will fail.
-- [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) only supports [`/v3`](TODO) endpoint.
- - Deprecated [`/v3beta`](https://github.com/etcd-io/etcd/pull/9298).
- - `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` doesn't work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
-- **`etcd --experimental-enable-v2v3` flag remains experimental and to be deprecated.**
- - v2 storage emulation feature will be deprecated in the next release.
- - etcd 3.5 is the last version that supports V2 API. Flags `--enable-v2` and `--experimental-enable-v2v3` [are now deprecated](https://github.com/etcd-io/etcd/pull/12940) and will be removed in etcd v3.6 release.
-- **`etcd --experimental-backend-bbolt-freelist-type` flag has been deprecated.** Use **`etcd --backend-bbolt-freelist-type`** instead. The default type is hashmap and it is stable now.
-- **`etcd --debug` flag has been deprecated.** Use **`etcd --log-level=debug`** instead.
-- Remove [`embed.Config.Debug`](https://github.com/etcd-io/etcd/pull/10947).
-- **`etcd --log-output` flag has been deprecated.** Use **`etcd --log-outputs`** instead.
-- **`etcd --logger=zap --log-outputs=stderr`** is now the default.
-- **`etcd --logger=capnslog` flag value has been deprecated.**
-- **`etcd --logger=zap --log-outputs=default` flag value is not supported.**.
- - Use `etcd --logger=zap --log-outputs=stderr`.
- - Or, use `etcd --logger=zap --log-outputs=systemd/journal` to send logs to the local systemd journal.
- - Previously, if etcd parent process ID (PPID) is 1 (e.g. run with systemd), `etcd --logger=capnslog --log-outputs=default` redirects server logs to local systemd journal. And if write to journald fails, it writes to `os.Stderr` as a fallback.
- - However, even with PPID 1, it can fail to dial systemd journal (e.g. run embedded etcd with Docker container). Then, [every single log write will fail](https://github.com/etcd-io/etcd/pull/9729) and fall back to `os.Stderr`, which is inefficient.
- - To avoid this problem, systemd journal logging must be configured manually.
-- **`etcd --log-outputs=stderr`** is now the default.
-- **`etcd --log-package-levels` flag for `capnslog` has been deprecated.** Now, **`etcd --logger=zap --log-outputs=stderr`** is the default.
-- **`[CLIENT-URL]/config/local/log` endpoint has been deprecated, as is `etcd --log-package-levels` flag.**
- - `curl http://127.0.0.1:2379/config/local/log -XPUT -d '{"Level":"DEBUG"}'` won't work.
- - Please use `etcd --logger=zap --log-outputs=stderr` instead.
-- Deprecated `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric. Use `etcd_mvcc_db_total_size_in_bytes` instead.
-- Deprecated `etcd_debugging_mvcc_put_total` Prometheus metric. Use `etcd_mvcc_put_total` instead.
-- Deprecated `etcd_debugging_mvcc_delete_total` Prometheus metric. Use `etcd_mvcc_delete_total` instead.
-- Deprecated `etcd_debugging_mvcc_txn_total` Prometheus metric. Use `etcd_mvcc_txn_total` instead.
-- Deprecated `etcd_debugging_mvcc_range_total` Prometheus metric. Use `etcd_mvcc_range_total` instead.
-- Main branch `/version` outputs `3.5.0-pre`, instead of `3.4.0+git`.
-- Changed `proxy` package function signature to [support structured logger](https://github.com/etcd-io/etcd/pull/11614).
- - Previously, `NewClusterProxy(c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{})`, now `NewClusterProxy(lg *zap.Logger, c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{})`.
- - Previously, `Register(c *clientv3.Client, prefix string, addr string, ttl int)`, now `Register(lg *zap.Logger, c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{}`.
- - Previously, `NewHandler(t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler`, now `NewHandler(lg *zap.Logger, t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler`.
-- Changed `pkg/flags` function signature to [support structured logger](https://github.com/etcd-io/etcd/pull/11616).
- - Previously, `SetFlagsFromEnv(prefix string, fs *flag.FlagSet) error`, now `SetFlagsFromEnv(lg *zap.Logger, prefix string, fs *flag.FlagSet) error`.
- - Previously, `SetPflagsFromEnv(prefix string, fs *pflag.FlagSet) error`, now `SetPflagsFromEnv(lg *zap.Logger, prefix string, fs *pflag.FlagSet) error`.
-- ClientV3 supports [grpc resolver API](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/resolver/resolver.go).
- - Endpoints can be managed using [endpoints.Manager](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/endpoints/endpoints.go)
- - Previously supported [GRPCResolver was decomissioned](https://github.com/etcd-io/etcd/pull/12675). Use [resolver](https://github.com/etcd-io/etcd/blob/main/client/v3/naming/resolver/resolver.go) instead.
-- Turned on [--pre-vote by default](https://github.com/etcd-io/etcd/pull/12770). Should prevent disrupting RAFT leader by an individual member.
-- [ETCD_CLIENT_DEBUG env](https://github.com/etcd-io/etcd/pull/12786): Now supports log levels (debug, info, warn, error, dpanic, panic, fatal). Only when set, overrides application-wide grpc logging settings.
-- [Embed Etcd.Close()](https://github.com/etcd-io/etcd/pull/12828) needs to called exactly once and closes Etcd.Err() stream.
-- [Embed Etcd does not override global/grpc logger](https://github.com/etcd-io/etcd/pull/12861) be default any longer. If desired, please call `embed.Config::SetupGlobalLoggers()` explicitly.
-- [Embed Etcd custom logger should be configured using simpler builder `NewZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/12973).
-- Client errors of `context cancelled` or `context deadline exceeded` are exposed as `codes.Canceled` and `codes.DeadlineExceeded`, instead of `codes.Unknown`.
-
-
-### Storage format changes
-- [WAL log's snapshots persists raftpb.ConfState](https://github.com/etcd-io/etcd/pull/12735)
-- [Backend persists raftpb.ConfState](https://github.com/etcd-io/etcd/pull/12962) in the `meta` bucket `confState` key.
-- [Backend persists applied term](https://github.com/etcd-io/etcd/pull/) in the `meta` bucket.
-- Backend persists `downgrade` in the `cluster` bucket
-
-### Security
-
-- Add [`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` and `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` to `etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/11864).
-- Changed [the format of WAL entries related to auth for not keeping password as a plain text](https://github.com/etcd-io/etcd/pull/11943).
-- Add third party [Security Audit Report](https://github.com/etcd-io/etcd/pull/12201).
-- A [log warning](https://github.com/etcd-io/etcd/pull/12242) is added when etcd uses any existing directory that has a permission different than 700 on Linux and 777 on Windows.
-- Add optional [`ClientCertFile` and `ClientKeyFile`](https://github.com/etcd-io/etcd/pull/12705) options for peer and client tls configuration when split certificates are used.
-
-### Metrics, Monitoring
-
-See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
-
-Note that any `etcd_debugging_*` metrics are experimental and subject to change.
-
-- Deprecated `etcd_debugging_mvcc_db_total_size_in_bytes` Prometheus metric. Use `etcd_mvcc_db_total_size_in_bytes` instead.
-- Deprecated `etcd_debugging_mvcc_put_total` Prometheus metric. Use `etcd_mvcc_put_total` instead.
-- Deprecated `etcd_debugging_mvcc_delete_total` Prometheus metric. Use `etcd_mvcc_delete_total` instead.
-- Deprecated `etcd_debugging_mvcc_txn_total` Prometheus metric. Use `etcd_mvcc_txn_total` instead.
-- Deprecated `etcd_debugging_mvcc_range_total` Prometheus metric. Use `etcd_mvcc_range_total` instead.
-- Add [`etcd_debugging_mvcc_current_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-- Add [`etcd_debugging_mvcc_compact_revision`](https://github.com/etcd-io/etcd/pull/11126) Prometheus metric.
-- Change [`etcd_cluster_version`](https://github.com/etcd-io/etcd/pull/11254) Prometheus metrics to include only major and minor version.
-- Add [`etcd_debugging_mvcc_total_put_size_in_bytes`](https://github.com/etcd-io/etcd/pull/11374) Prometheus metric.
-- Add [`etcd_server_client_requests_total` with `"type"` and `"client_api_version"` labels](https://github.com/etcd-io/etcd/pull/11687).
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-- Add [`etcd_debugging_auth_revision`](https://github.com/etcd-io/etcd/commit/f14d2a087f7b0fd6f7980b95b5e0b945109c95f3).
-- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214).
-- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13395).
-
-### etcd server
-
- - Add [don't attempt to grant nil permission to a role](https://github.com/etcd-io/etcd/pull/13086).
- - Add [don't activate alarms w/missing AlarmType](https://github.com/etcd-io/etcd/pull/13084).
- - Add [`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256` and `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` to `etcd --cipher-suites`](https://github.com/etcd-io/etcd/pull/11864).
- - Automatically [create parent directory if it does not exist](https://github.com/etcd-io/etcd/pull/9626) (fix [issue#9609](https://github.com/etcd-io/etcd/issues/9609)).
- - v4.0 will configure `etcd --enable-v2=true --enable-v2v3=/aaa` to enable v2 API server that is backed by **v3 storage**.
-- [`etcd --backend-bbolt-freelist-type`] flag is now stable.
- - `etcd --experimental-backend-bbolt-freelist-type` has been deprecated.
-- Support [downgrade API](https://github.com/etcd-io/etcd/pull/11715).
-- Deprecate v2 apply on cluster version. [Use v3 request to set cluster version and recover cluster version from v3 backend](https://github.com/etcd-io/etcd/pull/11427).
-- [Use v2 api to update cluster version to support mixed version cluster during upgrade](https://github.com/etcd-io/etcd/pull/12988).
-- [Fix corruption bug in defrag](https://github.com/etcd-io/etcd/pull/11613).
-- Fix [quorum protection logic when promoting a learner](https://github.com/etcd-io/etcd/pull/11640).
-- Improve [peer corruption checker](https://github.com/etcd-io/etcd/pull/11621) to work when peer mTLS is enabled.
-- Log [`[CLIENT-PORT]/health` check in server side](https://github.com/etcd-io/etcd/pull/11704).
-- Log [successful etcd server-side health check in debug level](https://github.com/etcd-io/etcd/pull/12677).
-- Improve [compaction performance when latest index is greater than 1-million](https://github.com/etcd-io/etcd/pull/11734).
-- [Refactor consistentindex](https://github.com/etcd-io/etcd/pull/11699).
-- [Add log when etcdserver failed to apply command](https://github.com/etcd-io/etcd/pull/11670).
-- Improve [count-only range performance](https://github.com/etcd-io/etcd/pull/11771).
-- Remove [redundant storage restore operation to shorten the startup time](https://github.com/etcd-io/etcd/pull/11779).
- - With 40 million key test data,it can shorten the startup time from 5 min to 2.5 min.
-- [Fix deadlock bug in mvcc](https://github.com/etcd-io/etcd/pull/11817).
-- Fix [inconsistency between WAL and server snapshot](https://github.com/etcd-io/etcd/pull/11888).
- - Previously, server restore fails if it had crashed after persisting raft hard state but before saving snapshot.
- - See https://github.com/etcd-io/etcd/issues/10219 for more.
- - Add [missing CRC checksum check in WAL validate method otherwise causes panic](https://github.com/etcd-io/etcd/pull/11924).
- - See https://github.com/etcd-io/etcd/issues/11918.
-- Improve logging around snapshot send and receive.
-- [Push down RangeOptions.limit argv into index tree to reduce memory overhead](https://github.com/etcd-io/etcd/pull/11990).
-- Add [reason field for /health response](https://github.com/etcd-io/etcd/pull/11983).
-- Add [exclude alarms from health check conditionally](https://github.com/etcd-io/etcd/pull/12880).
-- Add [`etcd --unsafe-no-fsync`](https://github.com/etcd-io/etcd/pull/11946) flag.
- - Setting the flag disables all uses of fsync, which is unsafe and will cause data loss. This flag makes it possible to run an etcd node for testing and development without placing lots of load on the file system.
-- Add [`etcd --auth-token-ttl`](https://github.com/etcd-io/etcd/pull/11980) flag to customize `simpleTokenTTL` settings.
-- Improve [`runtime.FDUsage` call pattern to reduce objects malloc of Memory Usage and CPU Usage](https://github.com/etcd-io/etcd/pull/11986).
-- Improve [mvcc.watchResponse channel Memory Usage](https://github.com/etcd-io/etcd/pull/11987).
-- Log [expensive request info in UnaryInterceptor](https://github.com/etcd-io/etcd/pull/12086).
-- [Fix invalid Go type in etcdserverpb](https://github.com/etcd-io/etcd/pull/12000).
-- [Improve healthcheck by using v3 range request and its corresponding timeout](https://github.com/etcd-io/etcd/pull/12195).
-- Add [`etcd --experimental-watch-progress-notify-interval`](https://github.com/etcd-io/etcd/pull/12216) flag to make watch progress notify interval configurable.
-- Fix [server panic in slow writes warnings](https://github.com/etcd-io/etcd/issues/12197).
- - Fixed via [PR#12238](https://github.com/etcd-io/etcd/pull/12238).
-- [Fix server panic](https://github.com/etcd-io/etcd/pull/12288) when force-new-cluster flag is enabled in a cluster which had learner node.
-- Add [`etcd --self-signed-cert-validity`](https://github.com/etcd-io/etcd/pull/12429) flag to support setting certificate expiration time.
- - Notice, certificates generated by etcd are valid for 1 year by default when specifying the auto-tls or peer-auto-tls option.
-- Add [`etcd --experimental-warning-apply-duration`](https://github.com/etcd-io/etcd/pull/12448) flag which allows apply duration threshold to be configurable.
-- Add [`etcd --experimental-memory-mlock`](https://github.com/etcd-io/etcd/pull/TODO) flag which prevents etcd memory pages to be swapped out.
-- Add [`etcd --socket-reuse-port`](https://github.com/etcd-io/etcd/pull/12702) flag
- - Setting this flag enables `SO_REUSEPORT` which allows rebind of a port already in use. User should take caution when using this flag to ensure flock is properly enforced.
-- Add [`etcd --socket-reuse-address`](https://github.com/etcd-io/etcd/pull/12702) flag
- - Setting this flag enables `SO_REUSEADDR` which allows binding to an address in `TIME_WAIT` state, improving etcd restart time.
-- Reduce [around 30% memory allocation by logging range response size without marshal](https://github.com/etcd-io/etcd/pull/12871).
-- `ETCD_VERIFY="all"` environment triggers [additional verification of consistency](https://github.com/etcd-io/etcd/pull/12901) of etcd data-dir files.
-- Add [`etcd --enable-log-rotation`](https://github.com/etcd-io/etcd/pull/12774) boolean flag which enables log rotation if true.
-- Add [`etcd --log-rotation-config-json`](https://github.com/etcd-io/etcd/pull/12774) flag which allows passthrough of JSON config to configure log rotation for a file output target.
-- Add experimental distributed tracing boolean flag [`--experimental-enable-distributed-tracing`](https://github.com/etcd-io/etcd/pull/12919) which enables tracing.
-- Add [`etcd --experimental-distributed-tracing-address`](https://github.com/etcd-io/etcd/pull/12919) string flag which allows configuring the OpenTelemetry collector address.
-- Add [`etcd --experimental-distributed-tracing-service-name`](https://github.com/etcd-io/etcd/pull/12919) string flag which allows changing the default "etcd" service name.
-- Add [`etcd --experimental-distributed-tracing-instance-id`](https://github.com/etcd-io/etcd/pull/12919) string flag which configures an instance ID, which must be unique per etcd instance.
-- Add [`--experimental-bootstrap-defrag-threshold-megabytes`](https://github.com/etcd-io/etcd/pull/12941) which configures a threshold for the unused db size and etcdserver will automatically perform defragmentation on bootstrap when it exceeds this value. The functionality is disabled if the value is 0.
-
-### Package `runtime`
-
-- Optimize [`runtime.FDUsage` by removing unnecessary sorting](https://github.com/etcd-io/etcd/pull/12214).
-
-### Package `embed`
-
-- Remove [`embed.Config.Debug`](https://github.com/etcd-io/etcd/pull/10947).
- - Use `embed.Config.LogLevel` instead.
-- Add [`embed.Config.ZapLoggerBuilder`](https://github.com/etcd-io/etcd/pull/11147) to allow creating a custom zap logger.
-- Replace [global `*zap.Logger` with etcd server logger object](https://github.com/etcd-io/etcd/pull/12212).
-- Add [`embed.Config.EnableLogRotation`](https://github.com/etcd-io/etcd/pull/12774) which enables log rotation if true.
-- Add [`embed.Config.LogRotationConfigJSON`](https://github.com/etcd-io/etcd/pull/12774) to allow passthrough of JSON config to configure log rotation for a file output target.
-- Add [`embed.Config.ExperimentalEnableDistributedTracing`](https://github.com/etcd-io/etcd/pull/12919) which enables experimental distributed tracing if true.
-- Add [`embed.Config.ExperimentalDistributedTracingAddress`](https://github.com/etcd-io/etcd/pull/12919) which allows overriding default collector address.
-- Add [`embed.Config.ExperimentalDistributedTracingServiceName`](https://github.com/etcd-io/etcd/pull/12919) which allows overriding default "etcd" service name.
-- Add [`embed.Config.ExperimentalDistributedTracingServiceInstanceID`](https://github.com/etcd-io/etcd/pull/12919) which allows configuring an instance ID, which must be uniquer per etcd instance.
-
-### Package `clientv3`
-
-- Remove [excessive watch cancel logging messages](https://github.com/etcd-io/etcd/pull/12187).
- - See [kubernetes/kubernetes#93450](https://github.com/kubernetes/kubernetes/issues/93450).
-- Add [`TryLock`](https://github.com/etcd-io/etcd/pull/11104) method to `clientv3/concurrency/Mutex`. A non-blocking method on `Mutex` which does not wait to get lock on the Mutex, returns immediately if Mutex is locked by another session.
-- Fix [client balancer failover against multiple endpoints](https://github.com/etcd-io/etcd/pull/11184).
- - Fix [`"kube-apiserver: failover on multi-member etcd cluster fails certificate check on DNS mismatch"`](https://github.com/kubernetes/kubernetes/issues/83028).
-- Fix [IPv6 endpoint parsing in client](https://github.com/etcd-io/etcd/pull/11211).
- - Fix ["1.16: etcd client does not parse IPv6 addresses correctly when members are joining" (kubernetes#83550)](https://github.com/kubernetes/kubernetes/issues/83550).
-- Fix [errors caused by grpc changing balancer/resolver API](https://github.com/etcd-io/etcd/pull/11564). This change is compatible with grpc >= [v1.26.0](https://github.com/grpc/grpc-go/releases/tag/v1.26.0), but is not compatible with < v1.26.0 version.
-- Use [ServerName as the authority](https://github.com/etcd-io/etcd/pull/11574) after bumping to grpc v1.26.0. Remove workaround in [#11184](https://github.com/etcd-io/etcd/pull/11184).
-- Fix [`"hasleader"` metadata embedding](https://github.com/etcd-io/etcd/pull/11687).
- - Previously, `clientv3.WithRequireLeader(ctx)` was overwriting existing context keys.
-- Fix [watch leak caused by lazy cancellation](https://github.com/etcd-io/etcd/pull/11850). When clients cancel their watches, a cancel request will now be immediately sent to the server instead of waiting for the next watch event.
-- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
-- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready.
-- Improve [clientv3:get AuthToken gracefully without extra connection](https://github.com/etcd-io/etcd/pull/12165).
-- Changed [clientv3 dialing code](https://github.com/etcd-io/etcd/pull/12671) to use grpc resolver API instead of custom balancer.
- - Endpoints self identify now as `etcd-endpoints://{id}/#initially={list of endpoints}` e.g. `etcd-endpoints://0xc0009d8540/#initially=[localhost:2079]`
-- Make sure [save snapshot downloads checksum for integrity checks](https://github.com/etcd-io/etcd/pull/11896).
-
-### Package `lease`
-
-- Fix [memory leak in follower nodes](https://github.com/etcd-io/etcd/pull/11731).
- - https://github.com/etcd-io/etcd/issues/11495
- - https://github.com/etcd-io/etcd/issues/11730
-- Make sure [grant/revoke won't be applied repeatedly after restarting etcd](https://github.com/etcd-io/etcd/pull/11935).
-
-### Package `wal`
-
-- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
-- Handle [out-of-range slice bound in `ReadAll` and entry limit in `decodeRecord`](https://github.com/etcd-io/etcd/pull/11793).
-
-### etcdctl v3
-
-- Fix `etcdctl member add` command to prevent potential timeout. ([PR#11194](https://github.com/etcd-io/etcd/pull/11194) and [PR#11638](https://github.com/etcd-io/etcd/pull/11638))
-- Add [`etcdctl watch --progress-notify`](https://github.com/etcd-io/etcd/pull/11462) flag.
-- Add [`etcdctl auth status`](https://github.com/etcd-io/etcd/pull/11536) command to check if authentication is enabled
-- Add [`etcdctl get --count-only`](https://github.com/etcd-io/etcd/pull/11743) flag for output type `fields`.
-- Add [`etcdctl member list -w=json --hex`](https://github.com/etcd-io/etcd/pull/11812) flag to print memberListResponse in hex format json.
-- Changed [`etcdctl lock exec-command`](https://github.com/etcd-io/etcd/pull/12829) to return exit code of exec-command.
-- [New tool: `etcdutl`](https://github.com/etcd-io/etcd/pull/12971) incorporated functionality of: `etcdctl snapshot status|restore`, `etcdctl backup`, `etcdctl defrag --data-dir ...`.
-- [ETCDCTL_API=3 `etcdctl migrate`](https://github.com/etcd-io/etcd/pull/12971) has been decommissioned. Use etcd <=v3.4 to restore v2 storage.
-
-### gRPC gateway
-
-- [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) only supports [`/v3`](TODO) endpoint.
- - Deprecated [`/v3beta`](https://github.com/etcd-io/etcd/pull/9298).
- - `curl -L http://localhost:2379/v3beta/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` does work in v3.5. Use `curl -L http://localhost:2379/v3/kv/put -X POST -d '{"key": "Zm9v", "value": "YmFy"}'` instead.
-- Set [`enable-grpc-gateway`](https://github.com/etcd-io/etcd/pull/12297) flag to true when using a config file to keep the defaults the same as the command line configuration.
-
-### gRPC Proxy
-
-- Fix [`panic on error`](https://github.com/etcd-io/etcd/pull/11694) for metrics handler.
-- Add [gRPC keepalive related flags](https://github.com/etcd-io/etcd/pull/11711) `grpc-keepalive-min-time`, `grpc-keepalive-interval` and `grpc-keepalive-timeout`.
-- [Fix grpc watch proxy hangs when failed to cancel a watcher](https://github.com/etcd-io/etcd/pull/12030) .
-- Add [metrics handler for grpcproxy self](https://github.com/etcd-io/etcd/pull/12107).
-- Add [health handler for grpcproxy self](https://github.com/etcd-io/etcd/pull/12114).
-
-### Auth
-
-- Fix [NoPassword check when adding user through GRPC gateway](https://github.com/etcd-io/etcd/pull/11418) ([issue#11414](https://github.com/etcd-io/etcd/issues/11414))
-- Fix bug where [some auth related messages are logged at wrong level](https://github.com/etcd-io/etcd/pull/11586)
-- [Fix a data corruption bug by saving consistent index](https://github.com/etcd-io/etcd/pull/11652).
-- [Improve checkPassword performance](https://github.com/etcd-io/etcd/pull/11735).
-- [Add authRevision field in AuthStatus](https://github.com/etcd-io/etcd/pull/11659).
-- Fix [a bug of not refreshing expired tokens](https://github.com/etcd-io/etcd/pull/13308).
--
-### API
-
-- Add [`/v3/auth/status`](https://github.com/etcd-io/etcd/pull/11536) endpoint to check if authentication is enabled
-- [Add `Linearizable` field to `etcdserverpb.MemberListRequest`](https://github.com/etcd-io/etcd/pull/11639).
-- [Learner support Snapshot RPC](https://github.com/etcd-io/etcd/pull/12890/).
-
-### Package `netutil`
-
-- Remove [`netutil.DropPort/RecoverPort/SetLatency/RemoveLatency`](https://github.com/etcd-io/etcd/pull/12491).
- - These are not used anymore. They were only used for older versions of functional testing.
- - Removed to adhere to best security practices, minimize arbitrary shell invocation.
-
-### `tools/etcd-dump-metrics`
-
-- Implement [input validation to prevent arbitrary shell invocation](https://github.com/etcd-io/etcd/pull/12491).
-
-### Dependency
-
-- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.23.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.23.0) to [**`v1.37.0`**](https://github.com/grpc/grpc-go/releases/tag/v1.37.0).
-- Upgrade [`go.uber.org/zap`](https://github.com/uber-go/zap/releases) from [**`v1.14.1`**](https://github.com/uber-go/zap/releases/tag/v1.14.1) to [**`v1.16.0`**](https://github.com/uber-go/zap/releases/tag/v1.16.0).
-
-### Platforms
-
-- etcd now [officially supports `arm64`](https://github.com/etcd-io/etcd/pull/12929).
- - See https://github.com/etcd-io/etcd/pull/12928 for adding automated tests with `arm64` EC2 instances (Graviton 2).
- - See https://github.com/etcd-io/website/pull/273 for new platform support tier policies.
-
-### Release
-
-- Add s390x build support ([PR#11548](https://github.com/etcd-io/etcd/pull/11548) and [PR#11358](https://github.com/etcd-io/etcd/pull/11358))
-
-### Go
-
-- Require [*Go 1.16+*](https://github.com/etcd-io/etcd/pull/11110).
-- Compile with [*Go 1.16+*](https://golang.org/doc/devel/release.html#go1.16)
-- etcd uses [go modules](https://github.com/etcd-io/etcd/pull/12279) (instead of vendor dir) to track dependencies.
-
-### Project Governance
-
-- The etcd team has added, a well defined and openly discussed, project [governance](https://github.com/etcd-io/etcd/pull/11175).
-
-
-
-
diff --git a/CHANGELOG/CHANGELOG-3.6.md b/CHANGELOG/CHANGELOG-3.6.md
deleted file mode 100644
index 0d8924bf245..00000000000
--- a/CHANGELOG/CHANGELOG-3.6.md
+++ /dev/null
@@ -1,96 +0,0 @@
-
-
-Previous change logs can be found at [CHANGELOG-3.5](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.5.md).
-
-
-
-## v3.6.0 (TBD)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.6.0).
-
-### Breaking Changes
-
-- `etcd` will no longer start on data dir created by newer versions (for example etcd v3.6 will not run on v3.7+ data dir). To downgrade data dir please check out `etcdutl migrate` command.
-- `etcd` doesn't support serving client requests on the peer listen endpoints (--listen-peer-urls). See [pull/13565](https://github.com/etcd-io/etcd/pull/13565).
-- `etcdctl` will sleep(2s) in case of range delete without `--range` flag. See [pull/13747](https://github.com/etcd-io/etcd/pull/13747)
-- Applications which depend on etcd v3.6 packages must be built with go version >= v1.18.
-
-### Deprecations
-
-- Deprecated [V2 discovery](https://etcd.io/docs/v3.5/dev-internal/discovery_protocol/).
-- Deprecated [SetKeepAlive and SetKeepAlivePeriod in limitListenerConn](https://github.com/etcd-io/etcd/pull/14356).
-- Removed [etcdctl defrag --data-dir](https://github.com/etcd-io/etcd/pull/13793).
-- Removed [etcdctl snapshot status](https://github.com/etcd-io/etcd/pull/13809).
-- Removed [etcdctl snapshot restore](https://github.com/etcd-io/etcd/pull/13809).
-- Removed [etcdutl snapshot save](https://github.com/etcd-io/etcd/pull/13809).
-
-
-### etcdctl v3
-
-- Add command to generate [shell completion](https://github.com/etcd-io/etcd/pull/13133).
-- When print endpoint status, [show db size in use](https://github.com/etcd-io/etcd/pull/13639)
-- [Always print the raft_term in decimal](https://github.com/etcd-io/etcd/pull/13711) when displaying member list in json.
-- [Add one more field `storageVersion`](https://github.com/etcd-io/etcd/pull/13773) into the response of command `etcdctl endpoint status`.
-- Add [`--max-txn-ops`](https://github.com/etcd-io/etcd/pull/14340) flag to make-mirror command.
-- Display [field `hash_revision`](https://github.com/etcd-io/etcd/pull/14812) for `etcdctl endpoint hash` command.
-
-### etcdutl v3
-
-- Add command to generate [shell completion](https://github.com/etcd-io/etcd/pull/13142).
-- Add `migrate` command for downgrading/upgrading etcd data dir files.
-
-### Package `server`
-
-- Package `mvcc` was moved to `storage/mvcc`
-- Package `mvcc/backend` was moved to `storage/backend`
-- Package `mvcc/buckets` was moved to `storage/schema`
-- Package `wal` was moved to `storage/wal`
-- Package `datadir` was moved to `storage/datadir`
-
-### Package `raft`
-- Send empty `MsgApp` when entry in-flight limits are exceeded. See [pull/14633](https://github.com/etcd-io/etcd/pull/14633).
-- Add [MaxInflightBytes](https://github.com/etcd-io/etcd/pull/14624) setting in `raft.Config` for better flow control of entries.
-- [Decouple raft from etcd](https://github.com/etcd-io/etcd/issues/14713). Migrated raft to a separate [repository](https://github.com/etcd-io/raft), and renamed raft module to `go.etcd.io/raft/v3`.
-
-### etcd server
-
-- Add [`etcd --log-format`](https://github.com/etcd-io/etcd/pull/13339) flag to support log format.
-- Add [`etcd --experimental-max-learners`](https://github.com/etcd-io/etcd/pull/13377) flag to allow configuration of learner max membership.
-- Add [`etcd --experimental-enable-lease-checkpoint-persist`](https://github.com/etcd-io/etcd/pull/13508) flag to handle upgrade from v3.5.2 clusters with this feature enabled.
-- Add [`etcdctl make-mirror --rev`](https://github.com/etcd-io/etcd/pull/13519) flag to support incremental mirror.
-- Add [`etcd --experimental-wait-cluster-ready-timeout`](https://github.com/etcd-io/etcd/pull/13525) flag to wait for cluster to be ready before serving client requests.
-- Add [v3 discovery](https://github.com/etcd-io/etcd/pull/13635) to bootstrap a new etcd cluster.
-- Add [field `storage`](https://github.com/etcd-io/etcd/pull/13772) into the response body of endpoint `/version`.
-- Add [`etcd --max-concurrent-streams`](https://github.com/etcd-io/etcd/pull/14169) flag to configure the max concurrent streams each client can open at a time, and defaults to math.MaxUint32.
-- Add [`etcd grpc-proxy --experimental-enable-grpc-logging`](https://github.com/etcd-io/etcd/pull/14266) flag to logging all grpc requests and responses.
-- Add [`etcd --experimental-compact-hash-check-enabled --experimental-compact-hash-check-time`](https://github.com/etcd-io/etcd/issues/14039) flags to support enabling reliable corruption detection on compacted revisions.
-- Add [Protection on maintenance request when auth is enabled](https://github.com/etcd-io/etcd/pull/14663).
-- Graduated [`--experimental-warning-unary-request-duration` to `--warning-unary-request-duration`](https://github.com/etcd-io/etcd/pull/14414). Note the experimental flag is deprecated and will be decommissioned in v3.7.
-- Add [field `hash_revision` into `HashKVResponse`](https://github.com/etcd-io/etcd/pull/14537).
-- Add [`etcd --experimental-snapshot-catch-up-entries`](https://github.com/etcd-io/etcd/pull/15033) flag to configure number of entries for a slow follower to catch up after compacting the the raft storage entries and defaults to 5k.
-
-### etcd grpc-proxy
-
-- Add [`etcd grpc-proxy start --endpoints-auto-sync-interval`](https://github.com/etcd-io/etcd/pull/14354) flag to enable and configure interval of auto sync of endpoints with server.
-- Add [`etcd grpc-proxy start --listen-cipher-suites`](https://github.com/etcd-io/etcd/pull/14308) flag to support adding configurable cipher list.
-
-### tools/benchmark
-
-- [Add etcd client autoSync flag](https://github.com/etcd-io/etcd/pull/13416)
-
-### Metrics, Monitoring
-
-See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
-
-- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13371).
-- Add [`etcd_debugging_server_alarms`](https://github.com/etcd-io/etcd/pull/14276).
-
-### Go
-- Require [Go 1.19+](https://github.com/etcd-io/etcd/pull/14463).
-- Compile with [Go 1.19+](https://golang.org/doc/devel/release.html#go1.19). Please refer to [gc-guide](https://go.dev/doc/gc-guide) to configure `GOGC` and `GOMEMLIMIT` properly.
-
-### Other
-
-- Use Distroless as base image to make the image less vulnerable and reduce image size.
-
-
diff --git a/CHANGELOG/CHANGELOG-4.0.md b/CHANGELOG/CHANGELOG-4.0.md
deleted file mode 100644
index 860e5efd072..00000000000
--- a/CHANGELOG/CHANGELOG-4.0.md
+++ /dev/null
@@ -1,44 +0,0 @@
-
-
-Previous change logs can be found at [CHANGELOG-3.x](https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.x.md).
-
-
-
-## v4.0.0 (TBD)
-
-See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v4.0.0) and [v4.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_4_0/) for any breaking changes.
-
-**Again, before running upgrades from any previous release, please make sure to read change logs below and [v4.0 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_4_0/).**
-
-### Breaking Changes
-
-- [Secure etcd by default](https://github.com/etcd-io/etcd/issues/9475)?
-- Deprecate [`etcd --proxy*`](TODO) flags; **no more v2 proxy**.
-- Deprecate [v2 storage backend](https://github.com/etcd-io/etcd/issues/9232); **no more v2 store**.
- - v2 API is still supported via [v2 emulation](TODO).
-- Deprecate [`etcdctl backup`](TODO) command.
-- `clientv3.Client.KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)` is now [`clientv4.Client.KeepAlive(ctx context.Context, id LeaseID) <-chan *LeaseKeepAliveResponse`](TODO).
- - Similar to `Watch`, [`KeepAlive` does not return errors](https://github.com/etcd-io/etcd/issues/7488).
- - If there's an unknown server error, kill all open channels and create a new stream on the next `KeepAlive` call.
-- Rename `github.com/coreos/client` to `github.com/coreos/clientv2`.
-- [`etcd --experimental-initial-corrupt-check`](TODO) has been deprecated.
- - Use [`etcd --initial-corrupt-check`](TODO) instead.
-- [`etcd --experimental-corrupt-check-time`](TODO) has been deprecated.
- - Use [`etcd --corrupt-check-time`](TODO) instead.
-- Enable TLS 1.13, deprecate TLS cipher suites.
-
-### etcd server
-
-- [`etcd --initial-corrupt-check`](TODO) flag is now stable (`etcd --experimental-initial-corrupt-check` has been deprecated).
- - `etcd --initial-corrupt-check=true` by default, to check cluster database hashes before serving client/peer traffic.
-- [`etcd --corrupt-check-time`](TODO) flag is now stable (`etcd --experimental-corrupt-check-time` has been deprecated).
- - `etcd --corrupt-check-time=12h` by default, to check cluster database hashes for every 12-hour.
-- Enable TLS 1.13, deprecate TLS cipher suites.
-
-### Go
-
-- Require [*Go 2*](https://blog.golang.org/go2draft).
-
-
-
-
diff --git a/CHANGELOG/README.md b/CHANGELOG/README.md
deleted file mode 100644
index 8f31bc34329..00000000000
--- a/CHANGELOG/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Change logs
-
-## Production recommendation
-
-The minimum recommended etcd versions to run in **production** are v3.4.8+ and v3.5.4+. Refer to the [versioning policy](https://etcd.io/docs/v3.5/op-guide/versioning/) for more details.
-
-### v3.5 data corruption issue
-
-Running etcd v3.5.2, v3.5.1 and v3.5.0 under high load can cause a data corruption issue.
-If etcd process is killed, occasionally some committed transactions are not reflected on all the members.
-Recommendation is to upgrade to v3.5.4+.
-
-If you have encountered data corruption, please follow instructions on https://etcd.io/docs/v3.5/op-guide/data_corruption/.
-
-## Change log rules
-1. Each patch release only includes changes against previous patch release.
-For example, the change log of v3.5.5 should only include items which are new to v3.5.4.
-2. For the first release (e.g. 3.4.0, 3.5.0, 3.6.0, 4.0.0 etc.) for each minor or major
-version, it only includes changes which are new to the first release of previous minor
-or major version. For example, v3.5.0 should only include items which are new to v3.4.0,
-and v3.6.0 should only include items which are new to v3.5.0.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
deleted file mode 100644
index 05cb8f8ed87..00000000000
--- a/CONTRIBUTING.md
+++ /dev/null
@@ -1,125 +0,0 @@
-# How to contribute
-
-etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
-This document outlines basics of contributing to etcd.
-
-This is a rough outline of what a contributor's workflow looks like:
-* [Find something to work on](#Find-something-to-work-on)
-* [Setup development environment](#Setup-development-environment)
-* [Implement your change](#Implement-your-change)
-* [Commit your change](#Commit-your-change)
-* [Create a pull request](#Create-a-pull-request)
-* [Get your pull request reviewed](#Get-your-pull-request-reviewed)
-
-If you have any questions about, please reach out using one of the methods listed in [contact].
-
-[contact]: ./README.md#Contact
-
-## Learn more about etcd
-
-Before making a change please look through resources below to learn more about etcd and tools used for development.
-
-* Please learn about [Git](https://github.com/git-guides) version control system used in etcd.
-* Read the [etcd learning resources](https://etcd.io/docs/v3.5/learning/)
-* Read the [etcd contributing guides](https://github.com/etcd-io/etcd/tree/main/Documentation/contributor-guide)
-* Watch [etcd deep dive](https://www.youtube.com/watch?v=D2pm6ufIt98&t=927s)
-* Watch [etcd code walk through](https://www.youtube.com/watch?v=H3XaSF6wF7w)
-
-## Find something to work on
-
-All the work in etcd project is tracked in [github issue tracker].
-Issues should be properly labeled making it easy to find something for you.
-
-Depending on your interest and experience you should check different labels:
-* If you are just starting, check issues labeled with [good first issue].
-* When you feel more conformable in your contributions, checkout [help wanted].
-* Advanced contributors can try to help with issues labeled [priority/important] covering most relevant work at the time.
-
-If any of aforementioned labels don't have unassigned issues, please [contact] one of the [maintainers] asking to triage more issues.
-
-[github issue tracker]: https://github.com/etcd-io/etcd/issues
-[good first issue]: https://github.com/etcd-io/etcd/labels/good%20first%20issue
-[help wanted]: https://github.com/etcd-io/etcd/labels/help%20wanted
-[maintainers]: https://github.com/etcd-io/etcd/blob/main/MAINTAINERS
-[priority/important]: https://github.com/etcd-io/etcd/labels/priority%2Fimportant
-
-## Setup development environment
-
-etcd supported development environments include only linux-amd64.
-Bug reports for any non-supported environments will be ignored.
-Supporting new environments requires introduction of proper tests and maintainer support that is currently lacking in etcd project.
-If you want help etcd support your preferred environment, please [file an issue].
-
-Setup environment:
-- [Clone the repository](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository)
-- Install Go by following [installation](https://go.dev/doc/install). Please check minimal go version in [go.mod file](./go.mod#L3).
-- Install build tools (make):
- - For ubuntu and debian run `sudo apt-get install build-essentials`
-- Verify that everything is installed by running `make build`
-
-Note: `make build` runs with `-v`. Other build flags can be added through env `GO_BUILD_FLAGS`, **if required**. Eg.,
-```console
-GO_BUILD_FLAGS="-buildmode=pie" make build
-```
-
-[file an issue]: https://github.com/etcd-io/etcd/issues/new/choose
-
-## Implement your change
-
-etcd code should follow coding style suggested by the Golang community.
-See the [style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details.
-
-Please ensure that your change passes static analysis (requires [golangci-lint](https://golangci-lint.run/usage/install/)):
-- `make verify` to verify if all checks pass.
-- `make verify-*` to verify a single check, for example `make verify-bom` to verify if bill-of-materials.json file is up-to-date.
-- `make fix` to fix all checks.
-- `make fix-*` to fix a single checks, for example `make fix-bom` to update bill-of-materials.json.
-
-Please ensure that your change passes tests.
-- `make test-unit` to run unit tests.
-- `make test-integration` to run integration tests.
-- `make test-e2e` to run e2e tests.
-
-All changes are expected to come with unit test.
-All new features are expected to have either e2e or integration tests.
-
-## Commit your change
-
-etcd follows a rough convention for commit messages:
-* First line:
- * Should start name of package (for example `etcdserver`, `etcdctl`) followed by `:` character.
- * Describe the `what` behind the change
-* Optionally author might provide the `why` behind the change in the main commit message body.
-* Last line should be `Signed-off-by: firstname lastname ` (can be automatically generate by providing `--signoff` to git commit command).
-
-Example of commit message:
-```
-etcdserver: add grpc interceptor to log info on incoming requests
-
-To improve debuggability of etcd v3. Added a grpc interceptor to log
-info on incoming requests to etcd server. The log output includes
-remote client info, request content (with value field redacted), request
-handling latency, response size, etc. Uses zap logger if available,
-otherwise uses capnslog.
-
-Signed-off-by: FirstName LastName
-```
-
-## Create a pull request
-
-Please follow [making a pull request](https://docs.github.com/en/get-started/quickstart/contributing-to-projects#making-a-pull-request) guide.
-
-If you are still working on the pull request, you can convert it to draft by clicking `Convert to draft` link just below list of reviewers.
-
-Multiple small PRs are preferred over single large ones (>500 lines of code).
-
-## Get your pull request reviewed
-
-Before requesting review please ensure that all GitHub checks were successful.
-It might happen that some unrelated tests on your PR are failing, due to their flakiness.
-In such cases please [file an issue] to deflake the problematic test and ask one of [maintainers] to rerun the tests.
-
-If all checks were successful feel free to reach out for review from people that were involved in the original discussion or [maintainers].
-Depending on complexity of the PR it might require between 1 and 2 maintainers to approve your change before merging.
-
-Thanks for contributing!
diff --git a/DCO b/DCO
deleted file mode 100644
index 716561d5d28..00000000000
--- a/DCO
+++ /dev/null
@@ -1,36 +0,0 @@
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
diff --git a/Dockerfile-release.amd64 b/Dockerfile-release.amd64
deleted file mode 100644
index 4f2fcbed349..00000000000
--- a/Dockerfile-release.amd64
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM --platform=linux/amd64 gcr.io/distroless/static-debian11
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD etcdutl /usr/local/bin/
-
-WORKDIR /var/etcd/
-WORKDIR /var/lib/etcd/
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/Dockerfile-release.arm64 b/Dockerfile-release.arm64
deleted file mode 100644
index c93763f661b..00000000000
--- a/Dockerfile-release.arm64
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM --platform=linux/arm64 gcr.io/distroless/static-debian11
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD etcdutl /usr/local/bin/
-
-WORKDIR /var/etcd/
-WORKDIR /var/lib/etcd/
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/Dockerfile-release.ppc64le b/Dockerfile-release.ppc64le
deleted file mode 100644
index 268e397410c..00000000000
--- a/Dockerfile-release.ppc64le
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM --platform=linux/ppc64le gcr.io/distroless/static-debian11
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD etcdutl /usr/local/bin/
-
-WORKDIR /var/etcd/
-WORKDIR /var/lib/etcd/
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/Dockerfile-release.s390x b/Dockerfile-release.s390x
deleted file mode 100644
index 4a280551deb..00000000000
--- a/Dockerfile-release.s390x
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM --platform=linux/s390x gcr.io/distroless/static-debian11
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD etcdutl /usr/local/bin/
-
-WORKDIR /var/etcd/
-WORKDIR /var/lib/etcd/
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/Documentation/README.md b/Documentation/README.md
deleted file mode 100644
index 5c1262f8e2e..00000000000
--- a/Documentation/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-This directory includes etcd project internal documentation for new and existing contributors.
-
-For user and developer documentation please go to [etcd.io](https://etcd.io/),
-which is developed in [website](https://github.com/etcd-io/website/) repo.
diff --git a/Documentation/contributor-guide/branch_management.md b/Documentation/contributor-guide/branch_management.md
deleted file mode 100644
index 838a08c7213..00000000000
--- a/Documentation/contributor-guide/branch_management.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Branch management
-
-## Guide
-
-* New development occurs on the [main branch][main].
-* Main branch should always have a green build!
-* Backwards-compatible bug fixes should target the main branch and subsequently be ported to stable branches.
-* Once the main branch is ready for release, it will be tagged and become the new stable branch.
-
-The etcd team has adopted a *rolling release model* and supports two stable versions of etcd.
-
-### Main branch
-
-The `main` branch is our development branch. All new features land here first.
-
-To try new and experimental features, pull `main` and play with it. Note that `main` may not be stable because new features may introduce bugs.
-
-Before the release of the next stable version, feature PRs will be frozen. A [release manager](./release.md/#release-management) will be assigned to major/minor version and will lead the etcd community in test, bug-fix and documentation of the release for one to two weeks.
-
-### Stable branches
-
-All branches with prefix `release-` are considered _stable_ branches.
-
-After every minor release ([semver.org](https://semver.org/)), we will have a new stable branch for that release, managed by a [patch release manager](./release.md/#release-management). We will keep fixing the backwards-compatible bugs for the latest two stable releases. A _patch_ release to each supported release branch, incorporating any bug fixes, will be once every two weeks, given any patches.
-
-[main]: https://github.com/etcd-io/etcd/tree/main
-
diff --git a/Documentation/contributor-guide/features.md b/Documentation/contributor-guide/features.md
deleted file mode 100644
index a88ac9a2548..00000000000
--- a/Documentation/contributor-guide/features.md
+++ /dev/null
@@ -1,83 +0,0 @@
-# Features
-
-This document provides an overview of etcd features and general development guidelines for adding and deprecating them. The project maintainers can override these guidelines per the need of the project following the project governance.
-
-## Overview
-
-The etcd features fall into three stages, experimental, stable, and unsafe.
-
-### Experimental
-
-Any new feature is usually added as an experimental feature. An experimental feature is characterized as below:
-- Might be buggy due to a lack of user testing. Enabling the feature may not work as expected.
-- Disabled by default when added initially.
-- Support for such a feature may be dropped at any time without notice
- - Feature related issues may be given lower priorities.
- - It can be removed in the next minor or major release without following the feature deprecation policy unless it graduates to the stable future.
-
-### Stable
-
-A stable feature is characterized as below:
-- Supported as part of the supported releases of etcd.
-- May be enabled by default.
-- Discontinuation of support must follow the feature deprecation policy.
-
-### Unsafe
-
-Unsafe features are rare and listed under the `Unsafe feature:` section in the etcd usage documentation. By default, they are disabled. They should be used with caution following documentation. An unsafe feature can be removed in the next minor or major release without following feature deprecation policy.
-
-## Development Guidelines
-
-### Adding a new feature
-
-Any new enhancements to the etcd are typically added as an experimental feature. The general development requirements are listed below. They can be somewhat flexible depending on the scope of the feature and review discussions, and will evolve over time.
-- Open an issue
- - It must provide a clear need for the proposed feature.
- - It should list development work items as checkboxes. There must be one work item towards future graduation to the stable future.
- - Label the issue with `type/feature` and `experimental`.
- - Keep the issue open for tracking purpose until a decision is made on graduation.
-- Open a Pull Request (PR)
- - Provide unit tests. Integreation tests are also recommended as possible.
- - Provide robust e2e test coverage. If the feature being added is complicated or quickly needed, maintainers can decide to go with e2e tests for basic coverage initially and have robust coverage added at the later time before feature graduation to the stable feature.
- - Provide logs for proper debugging.
- - Provide metrics and benchmarks as needed.
- - The Feature should be disabled by default.
- - Any configuration flags related to the implementation of the feature must be prefixed with `experimental` e.g. `--experimental-feature-name`.
- - Add a CHANGELOG entry.
-- At least two maintainers must approve feature requirements and related code changes.
-
-### Graduating an Experimental feature to Stable
-
-It is important that experimental features don't get stuck in that stage. They should be revisited and moved to the stable stage following the graduation steps as described here.
-
-#### Locate graduation candidate
-Decide if an experimental feature is ready for graduation to the stable stage.
-- Find the issue that was used to enable the experimental feature initially. One way to find such issues is to search for issues with `type/feature` and `experimental` labels.
-- Fix any known open issues against the feature.
-- Make sure the feature was enabled for at least one previous release. Check the PR(s) reference from the issue to see when the feature related code changes were merged.
-
-#### Provide implementation
-If an experimental feature is found ready for graduation to the stable stage, open a Pull Request (PR) with the following changes.
-- Add robust e2e tests if not already provided.
-- Add a new stable feature flag identical to the experimental feature flag but without the `--experimental` prefix.
-- Deprecate the experimental feature following the [feature deprecation policy](#Deprecating-a-feature).
-- Implementation must ensure that both the graduated and deprecated experimental feature flags work as expected. Note that both these flags will co-exist for the timeframe described in the feature deprecation policy.
-- Enable the graduated feature by default if needed.
-
-At least two maintainers must approve the work. Patch releases should not be considered for graduation.
-
-### Deprecating a feature
-
-#### Experimental
-An experimental feature deprecates when it graduates to the stable stage.
-- Add a deprecation message in the documentation of the experimental feature with a recommendation to use related stable feature. e.g. `DEPRECATED. Use instead.`
-- Add a `deprecated` label in the issue that was initially used to enable the experimental feature.
-
-#### Stable
-As the project evolves, a stable feature may sometimes need to be deprecated and removed. Such a situation should be handled using the steps below:
-- Create an issue for tracking purpose.
-- Add a deprecation message in the feature usage documentation before a planned release for feature deprecation. e.g. `To be deprecated in .`. If a new feature replaces the `To be deprecated` feature, then also provide a message saying so. e.g. `Use instead.`.
-- Deprecate the feature in the planned release with a message as part of the feature usage documentation. e.g. `DEPRECATED`. If a new feature replaces the deprecated feature, then also provide a message saying so. e.g. `DEPRECATED. Use instead.`.
-- Add a `deprecated` label in the related issue.
-
-Remove the deprecated feature in the following release. Close any related issue(s). At least two maintainers must approve the work. Patch releases should not be considered for deprecation.
diff --git a/Documentation/contributor-guide/local_cluster.md b/Documentation/contributor-guide/local_cluster.md
deleted file mode 100644
index 675674eec24..00000000000
--- a/Documentation/contributor-guide/local_cluster.md
+++ /dev/null
@@ -1,150 +0,0 @@
-# Set up local cluster
-
-For testing and development deployments, the quickest and easiest way is to configure a local cluster. For a production deployment, refer to the [clustering][clustering] section.
-
-## Local standalone cluster
-
-### Starting a cluster
-
-Run the following to deploy an etcd cluster as a standalone cluster:
-
-```
-$ ./etcd
-...
-```
-
-If the `etcd` binary is not present in the current working directory, it might be located either at `$GOPATH/bin/etcd` or at `/usr/local/bin/etcd`. Run the command appropriately.
-
-The running etcd member listens on `localhost:2379` for client requests.
-
-### Interacting with the cluster
-
-Use `etcdctl` to interact with the running cluster:
-
-1. Store an example key-value pair in the cluster:
-
- ```
- $ ./etcdctl put foo bar
- OK
- ```
-
- If OK is printed, storing key-value pair is successful.
-
-2. Retrieve the value of `foo`:
-
- ```
- $ ./etcdctl get foo
- bar
- ```
-
- If `bar` is returned, interaction with the etcd cluster is working as expected.
-
-## Local multi-member cluster
-
-### Starting a cluster
-
-A `Procfile` at the base of the etcd git repository is provided to easily configure a local multi-member cluster. To start a multi-member cluster, navigate to the root of the etcd source tree and perform the following:
-
-1. Install `goreman` to control Procfile-based applications:
-
- ```
- $ go install github.com/mattn/goreman@latest
- ```
- The installation will place executables in the $GOPATH/bin. If $GOPATH environment variable is not set, the tool will be installed into the $HOME/go/bin. Make sure that $PATH is set accordingly in your environment.
-
-2. Start a cluster with `goreman` using etcd's stock Procfile:
-
- ```
- $ goreman -f Procfile start
- ```
-
- The members start running. They listen on `localhost:2379`, `localhost:22379`, and `localhost:32379` respectively for client requests.
-
-### Interacting with the cluster
-
-Use `etcdctl` to interact with the running cluster:
-
-1. Print the list of members:
-
- ```
- $ etcdctl --write-out=table --endpoints=localhost:2379 member list
- ```
- The list of etcd members are displayed as follows:
-
- ```
- +------------------+---------+--------+------------------------+------------------------+
- | ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS |
- +------------------+---------+--------+------------------------+------------------------+
- | 8211f1d0f64f3269 | started | infra1 | http://127.0.0.1:2380 | http://127.0.0.1:2379 |
- | 91bc3c398fb3c146 | started | infra2 | http://127.0.0.1:22380 | http://127.0.0.1:22379 |
- | fd422379fda50e48 | started | infra3 | http://127.0.0.1:32380 | http://127.0.0.1:32379 |
- +------------------+---------+--------+------------------------+------------------------+
- ```
-
-2. Store an example key-value pair in the cluster:
-
- ```
- $ etcdctl put foo bar
- OK
- ```
-
- If OK is printed, storing key-value pair is successful.
-
-### Testing fault tolerance
-
-To exercise etcd's fault tolerance, kill a member and attempt to retrieve the key.
-
-1. Identify the process name of the member to be stopped.
-
- The `Procfile` lists the properties of the multi-member cluster. For example, consider the member with the process name, `etcd2`.
-
-2. Stop the member:
-
- ```
- # kill etcd2
- $ goreman run stop etcd2
- ```
-
-3. Store a key:
-
- ```
- $ etcdctl put key hello
- OK
- ```
-
-4. Retrieve the key that is stored in the previous step:
-
- ```
- $ etcdctl get key
- hello
- ```
-
-5. Retrieve a key from the stopped member:
-
- ```
- $ etcdctl --endpoints=localhost:22379 get key
- ```
-
- The command should display an error caused by connection failure:
-
- ```
- 2017/06/18 23:07:35 grpc: Conn.resetTransport failed to create client transport: connection error: desc = "transport: dial tcp 127.0.0.1:22379: getsockopt: connection refused"; Reconnecting to "localhost:22379"
- Error: grpc: timed out trying to connect
- ```
-6. Restart the stopped member:
-
- ```
- $ goreman run restart etcd2
- ```
-
-7. Get the key from the restarted member:
-
- ```
- $ etcdctl --endpoints=localhost:22379 get key
- hello
- ```
-
- Restarting the member re-establish the connection. `etcdctl` will now be able to retrieve the key successfully. To learn more about interacting with etcd, read [interacting with etcd section][interacting].
-
-[clustering]: https://etcd.io/docs/latest/op-guide/clustering/
-[interacting]: https://etcd.io/docs/latest/dev-guide/interacting_v3/
diff --git a/Documentation/contributor-guide/logging.md b/Documentation/contributor-guide/logging.md
deleted file mode 100644
index 9eb9032013e..00000000000
--- a/Documentation/contributor-guide/logging.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Logging Conventions
-
-etcd uses the [zap][zap] library for logging application output categorized into *levels*. A log message's level is determined according to these conventions:
-
-* Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. Usually not used in production.
- * Examples:
- * Send a normal message to a remote peer
- * Write a log entry to disk
-
-* Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. Should rather not be logged more frequently than once per a few seconds in normal server's operation.
- * Examples:
- * Startup configuration
- * Start to do snapshot
-
-* Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
- * Examples:
- * Failure to send raft message to a remote peer
- * Failure to receive heartbeat message within the configured election timeout
-
-* Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost.
- * Examples:
- * Failure to allocate disk space for WAL
-
-* Panic: Unrecoverable or unexpected error situation that requires stopping execution.
- * Examples:
- * Failure to create the database
-
-* Fatal: Unrecoverable or unexpected error situation that requires immediate exit. Mostly used in the test.
- * Examples:
- * Failure to find the data directory
- * Failure to run a test function
-
-[zap]: https://github.com/uber-go/zap
diff --git a/Documentation/contributor-guide/modules-future.svg b/Documentation/contributor-guide/modules-future.svg
deleted file mode 100644
index 92d060a29fc..00000000000
--- a/Documentation/contributor-guide/modules-future.svg
+++ /dev/null
@@ -1,604 +0,0 @@
-
-
diff --git a/Documentation/contributor-guide/modules.md b/Documentation/contributor-guide/modules.md
deleted file mode 100644
index a8551aa39eb..00000000000
--- a/Documentation/contributor-guide/modules.md
+++ /dev/null
@@ -1,94 +0,0 @@
-# Golang modules
-
-The etcd project (since version 3.5) is organized into multiple
-[golang modules](https://golang.org/ref/mod) hosted in a [single repository](https://golang.org/ref/mod#vcs-dir).
-
-![modules graph](modules.svg)
-
-There are following modules:
-
- - **go.etcd.io/etcd/api/v3** - contains API definitions
- (like protos & proto-generated libraries) that defines communication protocol
- between etcd clients and server.
-
- - **go.etcd.io/etcd/pkg/v3** - collection of utility packages used by etcd
- without being specific to etcd itself. A package belongs here
- only if it could possibly be moved out into its own repository in the future.
- Please avoid adding here code that has a lot of dependencies on its own, as
- they automatically becoming dependencies of the client library
- (that we want to keep lightweight).
-
- - **go.etcd.io/etcd/client/v3** - client library used to contact etcd over
- the network (grpc). Recommended for all new usage of etcd.
-
- - **go.etcd.io/etcd/client/v2** - legacy client library used to contact etcd
- over HTTP protocol. Deprecated. All new usage should depend on /v3 library.
-
- - **go.etcd.io/raft/v3** - implementation of distributed consensus
- protocol. Should have no etcd specific code. Hosted in a separate repository:
- https://github.com/etcd-io/raft.
-
- - **go.etcd.io/etcd/server/v3** - etcd implementation.
- The code in this package is etcd internal and should not be consumed
- by external projects. The package layout and API can change within the minor versions.
-
- - **go.etcd.io/etcd/etcdctl/v3** - a command line tool to access and manage etcd.
-
- - **go.etcd.io/etcd/tests/v3** - a module that contains all integration tests of etcd.
- Notice: All unit-tests (fast and not requiring cross-module dependencies)
- should be kept in the local modules to the code under the test.
-
- - **go.etcd.io/bbolt** - implementation of persistent b-tree.
- Hosted in a separate repository: https://github.com/etcd-io/bbolt.
-
-
-### Operations
-
-1. All etcd modules should be released in the same versions, e.g.
- `go.etcd.io/etcd/client/v3@v3.5.10` must depend on `go.etcd.io/etcd/api/v3@v3.5.10`.
-
- The consistent updating of versions can by performed using:
- ```shell script
- % DRY_RUN=false TARGET_VERSION="v3.5.10" ./scripts/release_mod.sh update_versions
- ```
-2. The released modules should be tagged according to https://golang.org/ref/mod#vcs-version rules,
- i.e. each module should get its own tag.
- The tagging can be performed using:
- ```shell script
- % DRY_RUN=false REMOTE_REPO="origin" ./scripts/release_mod.sh push_mod_tags
- ```
-
-3. All etcd modules should depend on the same versions of underlying dependencies.
- This can be verified using:
- ```shell script
- % PASSES="dep" ./test.sh
- ```
-
-4. The go.mod files must not contain dependencies not being used and must
- conform to `go mod tidy` format.
- This is being verified by:
- ```
- % PASSES="mod_tidy" ./test.sh
- ```
-
-5. To trigger actions across all modules (e.g. auto-format all files), please
- use/expand the following script:
- ```shell script
- % ./scripts/fix.sh
- ```
-
-### Future
-
-As a North Star, we would like to evaluate etcd modules towards following model:
-
-![modules graph](modules-future.svg)
-
-This assumes:
- - Splitting etcdmigrate/etcdadm out of etcdctl binary.
- Thanks to this etcdctl would become clearly a command-line wrapper
- around network client API,
- while etcdmigrate/etcdadm would support direct physical operations on the
- etcd storage files.
- - Splitting etcd-proxy out of ./etcd binary, as it contains more experimental code
- so carries additional risk & dependencies.
- - Deprecation of support for v2 protocol.
diff --git a/Documentation/contributor-guide/modules.svg b/Documentation/contributor-guide/modules.svg
deleted file mode 100644
index 5a3c3b2c39e..00000000000
--- a/Documentation/contributor-guide/modules.svg
+++ /dev/null
@@ -1,489 +0,0 @@
-
-
diff --git a/Documentation/contributor-guide/release.md b/Documentation/contributor-guide/release.md
deleted file mode 100644
index 44532a2eddb..00000000000
--- a/Documentation/contributor-guide/release.md
+++ /dev/null
@@ -1,75 +0,0 @@
-# Release
-
-The guide talks about how to release a new version of etcd.
-
-The procedure includes some manual steps for sanity checking, but it can probably be further scripted. Please keep this document up-to-date if making changes to the release process.
-
-## Release management
-
-etcd community members are assigned to manage the release each etcd major/minor version as well as manage patches
-and to each stable release branch. The managers are responsible for communicating the timelines and status of each
-release and for ensuring the stability of the release branch.
-
-| Releases | Manager |
-|------------------------|-------------------------------------------------------------|
-| 3.4 patch (post 3.4.0) | Benjamin Wang [@ahrtr](https://github.com/ahrtr) |
-| 3.5 patch (post 3.5.0) | Marek Siarkowicz [@serathius](https://github.com/serathius) |
-
-All releases version numbers follow the format of [semantic versioning 2.0.0](http://semver.org/).
-
-### Major, minor version release, or its pre-release
-
-- Ensure the relevant milestone on GitHub is complete. All referenced issues should be closed, or moved elsewhere.
-- Ensure the latest upgrade documentation is available.
-- Bump [hardcoded MinClusterVerion in the repository](https://github.com/etcd-io/etcd/blob/v3.4.15/version/version.go#L29), if necessary.
-- Add feature capability maps for the new version, if necessary.
-
-### Patch version release
-
-- To request a backport, devlopers submit cherrypick PRs targeting the release branch. The commits should not include merge commits. The commits should be restricted to bug fixes and security patches.
-- The cherrypick PRs should target the appropriate release branch (`base:release--`). `hack/patch/cherrypick.sh` may be used to automatically generate cherrypick PRs.
-- The release patch manager reviews the cherrypick PRs. Please discuss carefully what is backported to the patch release. Each patch release should be strictly better than it's predecessor.
-- The release patch manager will cherry-pick these commits starting from the oldest one into stable branch.
-
-## Write release note
-
-- Write introduction for the new release. For example, what major bug we fix, what new features we introduce or what performance improvement we make.
-- Put `[GH XXXX]` at the head of change line to reference Pull Request that introduces the change. Moreover, add a link on it to jump to the Pull Request.
-- Find PRs with `release-note` label and explain them in `NEWS` file, as a straightforward summary of changes for end-users.
-
-## Build and push the release artifacts
-
-- Ensure `docker` is available.
-
-Run release script in root directory:
-
-```
-DRY_RUN=false ./scripts/release.sh ${VERSION}
-```
-
-It generates all release binaries and images under directory ./release.
-Binaries are pushed to gcr.io and images are pushed to quay.io and gcr.io.
-
-## Publish release page in GitHub
-
-- Set release title as the version name.
-- Follow the format of previous release pages.
-- Attach the generated binaries and signatures.
-- Select whether it is a pre-release.
-- Publish the release!
-
-## Announce to the etcd-dev Googlegroup
-
-- Follow the format of [previous release emails](https://groups.google.com/forum/#!forum/etcd-dev).
-- Make sure to include a list of authors that contributed since the previous release - something like the following might be handy:
-
-```
-git log ...${PREV_VERSION} --pretty=format:"%an" | sort | uniq | tr '\n' ',' | sed -e 's#,#, #g' -e 's#, $##'
-```
-
-- Send email to etcd-dev@googlegroups.com
-
-## Post release
-
-- Create new stable branch through `git push origin ${VERSION_MAJOR}.${VERSION_MINOR}` if this is a major stable release. This assumes `origin` corresponds to "https://github.com/etcd-io/etcd".
-- Bump [hardcoded Version in the repository](https://github.com/etcd-io/etcd/blob/v3.4.15/version/version.go#L30) to the version `${VERSION}+git`.
diff --git a/Documentation/contributor-guide/reporting_bugs.md b/Documentation/contributor-guide/reporting_bugs.md
deleted file mode 100644
index 6804d369479..00000000000
--- a/Documentation/contributor-guide/reporting_bugs.md
+++ /dev/null
@@ -1,45 +0,0 @@
-# Reporting bugs
-
-If any part of the etcd project has bugs or documentation mistakes, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
-
-To make the bug report accurate and easy to understand, please try to create bug reports that are:
-
-- Specific. Include as much details as possible: which version, what environment, what configuration, etc. If the bug is related to running the etcd server, please attach the etcd log (the starting log with etcd configuration is especially important).
-
-- Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please includes the steps that might lead to the problem. If possible, please attach the affected etcd data dir and stack strace to the bug report.
-
-- Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on etcd is out of scope, but we are happy to provide guidance in the right direction or help with using etcd itself.
-
-- Unique. Do not duplicate existing bug report.
-
-- Scoped. One bug per report. Do not follow up with another bug inside one report.
-
-It may be worthwhile to read [Elika Etemad’s article on filing good bug reports][filing-good-bugs] before creating a bug report.
-
-We might ask for further information to locate a bug. A duplicated bug report will be closed.
-
-## Frequently asked questions
-
-### How to get a stack trace
-
-``` bash
-$ kill -QUIT $PID
-```
-
-### How to get etcd version
-
-``` bash
-$ etcd --version
-```
-
-### How to get etcd configuration and log when it runs as systemd service ‘etcd2.service’
-
-``` bash
-$ sudo systemctl cat etcd2
-$ sudo journalctl -u etcd2
-```
-
-Due to an upstream systemd bug, journald may miss the last few log lines when its processes exit. If journalctl says etcd stopped without fatal or panic message, try `sudo journalctl -f -t etcd2` to get full log.
-
-[etcd-issue]: https://github.com/etcd-io/etcd/issues/new
-[filing-good-bugs]: http://fantasai.inkedblade.net/style/talks/filing-good-bugs/
diff --git a/Documentation/contributor-guide/trige_issues.md b/Documentation/contributor-guide/trige_issues.md
deleted file mode 100644
index 91ff796a015..00000000000
--- a/Documentation/contributor-guide/trige_issues.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Issue triage guidelines
-
-## Purpose
-
-Speed up issue management.
-
-The `etcd` issues are listed at https://github.com/etcd-io/etcd/issues
-and are identified with labels. For example, an issue that is identified
-as a bug will eventually be set to label `area/bug`. New issues will
-start out without any labels, but typically `etcd` maintainers and active contributors
-add labels based on their findings. The detailed list of labels can be found at
-https://github.com/kubernetes/kubernetes/labels
-
-Following are few predetermined searches on issues for convenience:
-* [Bugs](https://github.com/etcd-io/etcd/labels/area%2Fbug)
-* [Help Wanted](https://github.com/etcd-io/etcd/labels/Help%20Wanted)
-* [Longest untriaged issues](https://github.com/etcd-io/etcd/issues?utf8=%E2%9C%93&q=is%3Aopen+sort%3Aupdated-asc+)
-
-## Scope
-
-These guidelines serves as a primary document for triaging an incoming issues in
-`etcd`. Everyone is welcome to help manage issues and PRs but the work and responsibilities discussed in this document are created with `etcd` maintainers and active contributors in mind.
-
-## Validate if an issue is a bug
-
-Validate if the issue is indeed a bug. If not, add a comment with findings and close trivial issue. For non-trivial issue, wait to hear back from issue reporter and see if there is any objection. If issue reporter does not reply in 30 days, close the issue. If the problem can not be reproduced or require more information, leave a comment for the issue reporter.
-
-## Inactive issues
-
-Issues that lack enough information from the issue reporter should be closed if issue reporter do not provide information in 60 days.
-
-## Duplicate issues
-
-If an issue is a duplicate, add a comment stating so along with a reference for the original issue and close it.
-
-## Issues that don't belong to etcd
-
-Sometime issues are reported that actually belongs to other projects that `etcd` use. For example, `grpc` or `golang` issues. Such issues should be addressed by asking reporter to open issues in appropriate other project. Close the issue unless a maintainer and issue reporter see a need to keep it open for tracking purpose.
-
-## Verify important labels are in place
-
-Make sure that issue has label on areas it belongs to, proper assignees are added and milestone is identified. If any of these labels are missing, add one. If labels can not be assigned due to limited privilege or correct label can not be decided, that’s fine, contact maintainers if needed.
-
-## Poke issue owner if needed
-
-If an issue owned by a developer has no PR created in 30 days, contact the issue owner and ask for a PR or to release ownership if needed.
diff --git a/Documentation/contributor-guide/trige_prs.md b/Documentation/contributor-guide/trige_prs.md
deleted file mode 100644
index c2b43ea765f..00000000000
--- a/Documentation/contributor-guide/trige_prs.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# PR management
-
-## Purpose
-
-Speed up PR management.
-
-The `etcd` PRs are listed at https://github.com/etcd-io/etcd/pulls
-A PR can have various labels, milestone, reviewer etc. The detailed list of labels can be found at
-https://github.com/kubernetes/kubernetes/labels
-
-Following are few example searches on PR for convenience:
-* [Open PRS for milestone etcd-v3.6](https://github.com/etcd-io/etcd/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+milestone%3Aetcd-v3.6)
-* [PRs under investigation](https://github.com/etcd-io/etcd/labels/Investigating)
-
-## Scope
-
-These guidelines serves as a primary document for managing PRs in `etcd`. Everyone is welcome to help manage PRs but the work and responsibilities discussed in this document is created with `etcd` maintainers and active contributors in mind.
-
-## Handle inactive PRs
-Poke PR owner if review comments are not addressed in 15 days. If PR owner does not reply in 90 days, update the PR with a new commit if possible. If not, inactive PR should be closed after 180 days.
-
-## Poke reviewer if needed
-
-Reviewers are responsive in a timely fashion, but considering everyone is busy, give them some time after requesting review if quick response is not provided. If response is not provided in 10 days, feel free to contact them via adding a comment in the PR or sending an email or message on the Slack.
-
-## Verify important labels are in place
-
-Make sure that appropriate reviewers are added to the PR. Also, make sure that a milestone is identified. If any of these or other important labels are missing, add them. If a correct label cannot be decided, leave a comment for the maintainers to do so as needed.
diff --git a/Documentation/dev-guide/apispec/swagger/rpc.swagger.json b/Documentation/dev-guide/apispec/swagger/rpc.swagger.json
deleted file mode 100644
index ca896fb501c..00000000000
--- a/Documentation/dev-guide/apispec/swagger/rpc.swagger.json
+++ /dev/null
@@ -1,3054 +0,0 @@
-{
- "consumes": [
- "application/json"
- ],
- "produces": [
- "application/json"
- ],
- "swagger": "2.0",
- "info": {
- "title": "api/etcdserverpb/rpc.proto",
- "version": "version not set"
- },
- "paths": {
- "/v3/auth/authenticate": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "Authenticate processes an authenticate request.",
- "operationId": "Auth_Authenticate",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthenticateRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthenticateResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/disable": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "AuthDisable disables authentication.",
- "operationId": "Auth_AuthDisable",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthDisableRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthDisableResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/enable": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "AuthEnable enables authentication.",
- "operationId": "Auth_AuthEnable",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthEnableRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthEnableResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/role/add": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleAdd adds a new role. Role name cannot be empty.",
- "operationId": "Auth_RoleAdd",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleAddRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleAddResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/role/delete": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleDelete deletes a specified role.",
- "operationId": "Auth_RoleDelete",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleDeleteRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleDeleteResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/role/get": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleGet gets detailed role information.",
- "operationId": "Auth_RoleGet",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleGetRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleGetResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/role/grant": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleGrantPermission grants a permission of a specified key or range to a specified role.",
- "operationId": "Auth_RoleGrantPermission",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleGrantPermissionRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleGrantPermissionResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/role/list": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleList gets lists of all roles.",
- "operationId": "Auth_RoleList",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleListRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleListResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/role/revoke": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "RoleRevokePermission revokes a key or range permission of a specified role.",
- "operationId": "Auth_RoleRevokePermission",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleRevokePermissionRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthRoleRevokePermissionResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/status": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "AuthStatus displays authentication status.",
- "operationId": "Auth_AuthStatus",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthStatusRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthStatusResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/user/add": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserAdd adds a new user. User name cannot be empty.",
- "operationId": "Auth_UserAdd",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserAddRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserAddResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/user/changepw": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserChangePassword changes the password of a specified user.",
- "operationId": "Auth_UserChangePassword",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserChangePasswordRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserChangePasswordResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/user/delete": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserDelete deletes a specified user.",
- "operationId": "Auth_UserDelete",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserDeleteRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserDeleteResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/user/get": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserGet gets detailed user information.",
- "operationId": "Auth_UserGet",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserGetRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserGetResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/user/grant": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserGrant grants a role to a specified user.",
- "operationId": "Auth_UserGrantRole",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserGrantRoleRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserGrantRoleResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/user/list": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserList gets a list of all users.",
- "operationId": "Auth_UserList",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserListRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserListResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/auth/user/revoke": {
- "post": {
- "tags": [
- "Auth"
- ],
- "summary": "UserRevokeRole revokes a role of specified user.",
- "operationId": "Auth_UserRevokeRole",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserRevokeRoleRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAuthUserRevokeRoleResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/cluster/member/add": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberAdd adds a member into the cluster.",
- "operationId": "Cluster_MemberAdd",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberAddRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberAddResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/cluster/member/list": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberList lists all the members in the cluster.",
- "operationId": "Cluster_MemberList",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberListRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberListResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/cluster/member/promote": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberPromote promotes a member from raft learner (non-voting) to raft voting member.",
- "operationId": "Cluster_MemberPromote",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberPromoteRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberPromoteResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/cluster/member/remove": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberRemove removes an existing member from the cluster.",
- "operationId": "Cluster_MemberRemove",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberRemoveRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberRemoveResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/cluster/member/update": {
- "post": {
- "tags": [
- "Cluster"
- ],
- "summary": "MemberUpdate updates the member configuration.",
- "operationId": "Cluster_MemberUpdate",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberUpdateRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbMemberUpdateResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/kv/compaction": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "Compact compacts the event history in the etcd key-value store. The key-value\nstore should be periodically compacted or the event history will continue to grow\nindefinitely.",
- "operationId": "KV_Compact",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbCompactionRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbCompactionResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/kv/deleterange": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "DeleteRange deletes the given range from the key-value store.\nA delete request increments the revision of the key-value store\nand generates a delete event in the event history for every deleted key.",
- "operationId": "KV_DeleteRange",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbDeleteRangeRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbDeleteRangeResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/kv/lease/leases": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseLeases lists all existing leases.",
- "operationId": "Lease_LeaseLeases2",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseLeasesRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseLeasesResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/kv/lease/revoke": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.",
- "operationId": "Lease_LeaseRevoke2",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseRevokeRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseRevokeResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/kv/lease/timetolive": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseTimeToLive retrieves lease information.",
- "operationId": "Lease_LeaseTimeToLive2",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/kv/put": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "Put puts the given key into the key-value store.\nA put request increments the revision of the key-value store\nand generates one event in the event history.",
- "operationId": "KV_Put",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbPutRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbPutResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/kv/range": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "Range gets the keys in the range from the key-value store.",
- "operationId": "KV_Range",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbRangeRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbRangeResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/kv/txn": {
- "post": {
- "tags": [
- "KV"
- ],
- "summary": "Txn processes multiple requests in a single transaction.\nA txn request increments the revision of the key-value store\nand generates events with the same revision for every completed request.\nIt is not allowed to modify the same key several times within one txn.",
- "operationId": "KV_Txn",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbTxnRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbTxnResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/lease/grant": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseGrant creates a lease which expires if the server does not receive a keepAlive\nwithin a given time to live period. All keys attached to the lease will be expired and\ndeleted if the lease expires. Each expired key generates a delete event in the event history.",
- "operationId": "Lease_LeaseGrant",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseGrantRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseGrantResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/lease/keepalive": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client\nto the server and streaming keep alive responses from the server to the client.",
- "operationId": "Lease_LeaseKeepAlive",
- "parameters": [
- {
- "description": " (streaming inputs)",
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseKeepAliveRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.(streaming responses)",
- "schema": {
- "type": "object",
- "title": "Stream result of etcdserverpbLeaseKeepAliveResponse",
- "properties": {
- "error": {
- "$ref": "#/definitions/runtimeStreamError"
- },
- "result": {
- "$ref": "#/definitions/etcdserverpbLeaseKeepAliveResponse"
- }
- }
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/lease/leases": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseLeases lists all existing leases.",
- "operationId": "Lease_LeaseLeases",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseLeasesRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseLeasesResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/lease/revoke": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.",
- "operationId": "Lease_LeaseRevoke",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseRevokeRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseRevokeResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/lease/timetolive": {
- "post": {
- "tags": [
- "Lease"
- ],
- "summary": "LeaseTimeToLive retrieves lease information.",
- "operationId": "Lease_LeaseTimeToLive",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/maintenance/alarm": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Alarm activates, deactivates, and queries alarms regarding cluster health.",
- "operationId": "Maintenance_Alarm",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbAlarmRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbAlarmResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/maintenance/defragment": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Defragment defragments a member's backend database to recover storage space.",
- "operationId": "Maintenance_Defragment",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbDefragmentRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbDefragmentResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/maintenance/downgrade": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Downgrade requests downgrades, verifies feasibility or cancels downgrade\non the cluster version.\nSupported since etcd 3.5.",
- "operationId": "Maintenance_Downgrade",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbDowngradeRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbDowngradeResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/maintenance/hash": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "HashKV computes the hash of all MVCC keys up to a given revision.\nIt only iterates \"key\" bucket in backend storage.",
- "operationId": "Maintenance_HashKV",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbHashKVRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbHashKVResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/maintenance/snapshot": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Snapshot sends a snapshot of the entire backend from a member over a stream to a client.",
- "operationId": "Maintenance_Snapshot",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbSnapshotRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.(streaming responses)",
- "schema": {
- "type": "object",
- "title": "Stream result of etcdserverpbSnapshotResponse",
- "properties": {
- "error": {
- "$ref": "#/definitions/runtimeStreamError"
- },
- "result": {
- "$ref": "#/definitions/etcdserverpbSnapshotResponse"
- }
- }
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/maintenance/status": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "Status gets the status of the member.",
- "operationId": "Maintenance_Status",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbStatusRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbStatusResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/maintenance/transfer-leadership": {
- "post": {
- "tags": [
- "Maintenance"
- ],
- "summary": "MoveLeader requests current leader node to transfer its leadership to transferee.",
- "operationId": "Maintenance_MoveLeader",
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbMoveLeaderRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/etcdserverpbMoveLeaderResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- },
- "/v3/watch": {
- "post": {
- "tags": [
- "Watch"
- ],
- "summary": "Watch watches for events happening or that have happened. Both input and output\nare streams; the input stream is for creating and canceling watchers and the output\nstream sends events. One watch RPC can watch on multiple key ranges, streaming events\nfor several watches at once. The entire event history can be watched starting from the\nlast compaction revision.",
- "operationId": "Watch_Watch",
- "parameters": [
- {
- "description": " (streaming inputs)",
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/etcdserverpbWatchRequest"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "A successful response.(streaming responses)",
- "schema": {
- "type": "object",
- "title": "Stream result of etcdserverpbWatchResponse",
- "properties": {
- "error": {
- "$ref": "#/definitions/runtimeStreamError"
- },
- "result": {
- "$ref": "#/definitions/etcdserverpbWatchResponse"
- }
- }
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- }
- }
- }
- },
- "definitions": {
- "AlarmRequestAlarmAction": {
- "type": "string",
- "default": "GET",
- "enum": [
- "GET",
- "ACTIVATE",
- "DEACTIVATE"
- ]
- },
- "CompareCompareResult": {
- "type": "string",
- "default": "EQUAL",
- "enum": [
- "EQUAL",
- "GREATER",
- "LESS",
- "NOT_EQUAL"
- ]
- },
- "CompareCompareTarget": {
- "type": "string",
- "default": "VERSION",
- "enum": [
- "VERSION",
- "CREATE",
- "MOD",
- "VALUE",
- "LEASE"
- ]
- },
- "DowngradeRequestDowngradeAction": {
- "type": "string",
- "default": "VALIDATE",
- "enum": [
- "VALIDATE",
- "ENABLE",
- "CANCEL"
- ]
- },
- "EventEventType": {
- "type": "string",
- "default": "PUT",
- "enum": [
- "PUT",
- "DELETE"
- ]
- },
- "RangeRequestSortOrder": {
- "type": "string",
- "default": "NONE",
- "enum": [
- "NONE",
- "ASCEND",
- "DESCEND"
- ]
- },
- "RangeRequestSortTarget": {
- "type": "string",
- "default": "KEY",
- "enum": [
- "KEY",
- "VERSION",
- "CREATE",
- "MOD",
- "VALUE"
- ]
- },
- "WatchCreateRequestFilterType": {
- "description": " - NOPUT: filter out put event.\n - NODELETE: filter out delete event.",
- "type": "string",
- "default": "NOPUT",
- "enum": [
- "NOPUT",
- "NODELETE"
- ]
- },
- "authpbPermission": {
- "type": "object",
- "title": "Permission is a single entity",
- "properties": {
- "key": {
- "type": "string",
- "format": "byte"
- },
- "permType": {
- "$ref": "#/definitions/authpbPermissionType"
- },
- "range_end": {
- "type": "string",
- "format": "byte"
- }
- }
- },
- "authpbPermissionType": {
- "type": "string",
- "default": "READ",
- "enum": [
- "READ",
- "WRITE",
- "READWRITE"
- ]
- },
- "authpbUserAddOptions": {
- "type": "object",
- "properties": {
- "no_password": {
- "type": "boolean"
- }
- }
- },
- "etcdserverpbAlarmMember": {
- "type": "object",
- "properties": {
- "alarm": {
- "description": "alarm is the type of alarm which has been raised.",
- "$ref": "#/definitions/etcdserverpbAlarmType"
- },
- "memberID": {
- "description": "memberID is the ID of the member associated with the raised alarm.",
- "type": "string",
- "format": "uint64"
- }
- }
- },
- "etcdserverpbAlarmRequest": {
- "type": "object",
- "properties": {
- "action": {
- "description": "action is the kind of alarm request to issue. The action\nmay GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a\nraised alarm.",
- "$ref": "#/definitions/AlarmRequestAlarmAction"
- },
- "alarm": {
- "description": "alarm is the type of alarm to consider for this request.",
- "$ref": "#/definitions/etcdserverpbAlarmType"
- },
- "memberID": {
- "description": "memberID is the ID of the member associated with the alarm. If memberID is 0, the\nalarm request covers all members.",
- "type": "string",
- "format": "uint64"
- }
- }
- },
- "etcdserverpbAlarmResponse": {
- "type": "object",
- "properties": {
- "alarms": {
- "description": "alarms is a list of alarms associated with the alarm request.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbAlarmMember"
- }
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAlarmType": {
- "type": "string",
- "default": "NONE",
- "enum": [
- "NONE",
- "NOSPACE",
- "CORRUPT"
- ]
- },
- "etcdserverpbAuthDisableRequest": {
- "type": "object"
- },
- "etcdserverpbAuthDisableResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthEnableRequest": {
- "type": "object"
- },
- "etcdserverpbAuthEnableResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthRoleAddRequest": {
- "type": "object",
- "properties": {
- "name": {
- "description": "name is the name of the role to add to the authentication system.",
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthRoleAddResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthRoleDeleteRequest": {
- "type": "object",
- "properties": {
- "role": {
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthRoleDeleteResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthRoleGetRequest": {
- "type": "object",
- "properties": {
- "role": {
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthRoleGetResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "perm": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/authpbPermission"
- }
- }
- }
- },
- "etcdserverpbAuthRoleGrantPermissionRequest": {
- "type": "object",
- "properties": {
- "name": {
- "description": "name is the name of the role which will be granted the permission.",
- "type": "string"
- },
- "perm": {
- "description": "perm is the permission to grant to the role.",
- "$ref": "#/definitions/authpbPermission"
- }
- }
- },
- "etcdserverpbAuthRoleGrantPermissionResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthRoleListRequest": {
- "type": "object"
- },
- "etcdserverpbAuthRoleListResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "roles": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "etcdserverpbAuthRoleRevokePermissionRequest": {
- "type": "object",
- "properties": {
- "key": {
- "type": "string",
- "format": "byte"
- },
- "range_end": {
- "type": "string",
- "format": "byte"
- },
- "role": {
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthRoleRevokePermissionResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthStatusRequest": {
- "type": "object"
- },
- "etcdserverpbAuthStatusResponse": {
- "type": "object",
- "properties": {
- "authRevision": {
- "type": "string",
- "format": "uint64",
- "title": "authRevision is the current revision of auth store"
- },
- "enabled": {
- "type": "boolean"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthUserAddRequest": {
- "type": "object",
- "properties": {
- "hashedPassword": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "options": {
- "$ref": "#/definitions/authpbUserAddOptions"
- },
- "password": {
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthUserAddResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthUserChangePasswordRequest": {
- "type": "object",
- "properties": {
- "hashedPassword": {
- "description": "hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.",
- "type": "string"
- },
- "name": {
- "description": "name is the name of the user whose password is being changed.",
- "type": "string"
- },
- "password": {
- "description": "password is the new password for the user. Note that this field will be removed in the API layer.",
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthUserChangePasswordResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthUserDeleteRequest": {
- "type": "object",
- "properties": {
- "name": {
- "description": "name is the name of the user to delete.",
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthUserDeleteResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthUserGetRequest": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthUserGetResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "roles": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "etcdserverpbAuthUserGrantRoleRequest": {
- "type": "object",
- "properties": {
- "role": {
- "description": "role is the name of the role to grant to the user.",
- "type": "string"
- },
- "user": {
- "description": "user is the name of the user which should be granted a given role.",
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthUserGrantRoleResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthUserListRequest": {
- "type": "object"
- },
- "etcdserverpbAuthUserListResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "users": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "etcdserverpbAuthUserRevokeRoleRequest": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "role": {
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthUserRevokeRoleResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbAuthenticateRequest": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "password": {
- "type": "string"
- }
- }
- },
- "etcdserverpbAuthenticateResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "token": {
- "type": "string",
- "title": "token is an authorized token that can be used in succeeding RPCs"
- }
- }
- },
- "etcdserverpbCompactionRequest": {
- "description": "CompactionRequest compacts the key-value store up to a given revision. All superseded keys\nwith a revision less than the compaction revision will be removed.",
- "type": "object",
- "properties": {
- "physical": {
- "description": "physical is set so the RPC will wait until the compaction is physically\napplied to the local database such that compacted entries are totally\nremoved from the backend database.",
- "type": "boolean"
- },
- "revision": {
- "description": "revision is the key-value store revision for the compaction operation.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "etcdserverpbCompactionResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbCompare": {
- "type": "object",
- "properties": {
- "create_revision": {
- "type": "string",
- "format": "int64",
- "title": "create_revision is the creation revision of the given key"
- },
- "key": {
- "description": "key is the subject key for the comparison operation.",
- "type": "string",
- "format": "byte"
- },
- "lease": {
- "description": "lease is the lease id of the given key.",
- "type": "string",
- "format": "int64"
- },
- "mod_revision": {
- "description": "mod_revision is the last modified revision of the given key.",
- "type": "string",
- "format": "int64"
- },
- "range_end": {
- "description": "range_end compares the given target to all keys in the range [key, range_end).\nSee RangeRequest for more details on key ranges.",
- "type": "string",
- "format": "byte"
- },
- "result": {
- "description": "result is logical comparison operation for this comparison.",
- "$ref": "#/definitions/CompareCompareResult"
- },
- "target": {
- "description": "target is the key-value field to inspect for the comparison.",
- "$ref": "#/definitions/CompareCompareTarget"
- },
- "value": {
- "description": "value is the value of the given key, in bytes.",
- "type": "string",
- "format": "byte"
- },
- "version": {
- "type": "string",
- "format": "int64",
- "title": "version is the version of the given key"
- }
- }
- },
- "etcdserverpbDefragmentRequest": {
- "type": "object"
- },
- "etcdserverpbDefragmentResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbDeleteRangeRequest": {
- "type": "object",
- "properties": {
- "key": {
- "description": "key is the first key to delete in the range.",
- "type": "string",
- "format": "byte"
- },
- "prev_kv": {
- "description": "If prev_kv is set, etcd gets the previous key-value pairs before deleting it.\nThe previous key-value pairs will be returned in the delete response.",
- "type": "boolean"
- },
- "range_end": {
- "description": "range_end is the key following the last key to delete for the range [key, range_end).\nIf range_end is not given, the range is defined to contain only the key argument.\nIf range_end is one bit larger than the given key, then the range is all the keys\nwith the prefix (the given key).\nIf range_end is '\\0', the range is all keys greater than or equal to the key argument.",
- "type": "string",
- "format": "byte"
- }
- }
- },
- "etcdserverpbDeleteRangeResponse": {
- "type": "object",
- "properties": {
- "deleted": {
- "description": "deleted is the number of keys deleted by the delete range request.",
- "type": "string",
- "format": "int64"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "prev_kvs": {
- "description": "if prev_kv is set in the request, the previous key-value pairs will be returned.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/mvccpbKeyValue"
- }
- }
- }
- },
- "etcdserverpbDowngradeRequest": {
- "type": "object",
- "properties": {
- "action": {
- "description": "action is the kind of downgrade request to issue. The action may\nVALIDATE the target version, DOWNGRADE the cluster version,\nor CANCEL the current downgrading job.",
- "$ref": "#/definitions/DowngradeRequestDowngradeAction"
- },
- "version": {
- "description": "version is the target version to downgrade.",
- "type": "string"
- }
- }
- },
- "etcdserverpbDowngradeResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "version": {
- "description": "version is the current cluster version.",
- "type": "string"
- }
- }
- },
- "etcdserverpbHashKVRequest": {
- "type": "object",
- "properties": {
- "revision": {
- "description": "revision is the key-value store revision for the hash operation.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "etcdserverpbHashKVResponse": {
- "type": "object",
- "properties": {
- "compact_revision": {
- "description": "compact_revision is the compacted revision of key-value store when hash begins.",
- "type": "string",
- "format": "int64"
- },
- "hash": {
- "description": "hash is the hash value computed from the responding member's MVCC keys up to a given revision.",
- "type": "integer",
- "format": "int64"
- },
- "hash_revision": {
- "description": "hash_revision is the revision up to which the hash is calculated.",
- "type": "string",
- "format": "int64"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbHashRequest": {
- "type": "object"
- },
- "etcdserverpbHashResponse": {
- "type": "object",
- "properties": {
- "hash": {
- "description": "hash is the hash value computed from the responding member's KV's backend.",
- "type": "integer",
- "format": "int64"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbLeaseGrantRequest": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.",
- "type": "string",
- "format": "int64"
- },
- "TTL": {
- "description": "TTL is the advisory time-to-live in seconds. Expired lease will return -1.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "etcdserverpbLeaseGrantResponse": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the lease ID for the granted lease.",
- "type": "string",
- "format": "int64"
- },
- "TTL": {
- "description": "TTL is the server chosen lease time-to-live in seconds.",
- "type": "string",
- "format": "int64"
- },
- "error": {
- "type": "string"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbLeaseKeepAliveRequest": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the lease ID for the lease to keep alive.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "etcdserverpbLeaseKeepAliveResponse": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the lease ID from the keep alive request.",
- "type": "string",
- "format": "int64"
- },
- "TTL": {
- "description": "TTL is the new time-to-live for the lease.",
- "type": "string",
- "format": "int64"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbLeaseLeasesRequest": {
- "type": "object"
- },
- "etcdserverpbLeaseLeasesResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "leases": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbLeaseStatus"
- }
- }
- }
- },
- "etcdserverpbLeaseRevokeRequest": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "etcdserverpbLeaseRevokeResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbLeaseStatus": {
- "type": "object",
- "properties": {
- "ID": {
- "type": "string",
- "format": "int64"
- }
- }
- },
- "etcdserverpbLeaseTimeToLiveRequest": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the lease ID for the lease.",
- "type": "string",
- "format": "int64"
- },
- "keys": {
- "description": "keys is true to query all the keys attached to this lease.",
- "type": "boolean"
- }
- }
- },
- "etcdserverpbLeaseTimeToLiveResponse": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the lease ID from the keep alive request.",
- "type": "string",
- "format": "int64"
- },
- "TTL": {
- "description": "TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.",
- "type": "string",
- "format": "int64"
- },
- "grantedTTL": {
- "description": "GrantedTTL is the initial granted time in seconds upon lease creation/renewal.",
- "type": "string",
- "format": "int64"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "keys": {
- "description": "Keys is the list of keys attached to this lease.",
- "type": "array",
- "items": {
- "type": "string",
- "format": "byte"
- }
- }
- }
- },
- "etcdserverpbMember": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the member ID for this member.",
- "type": "string",
- "format": "uint64"
- },
- "clientURLs": {
- "description": "clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "isLearner": {
- "description": "isLearner indicates if the member is raft learner.",
- "type": "boolean"
- },
- "name": {
- "description": "name is the human-readable name of the member. If the member is not started, the name will be an empty string.",
- "type": "string"
- },
- "peerURLs": {
- "description": "peerURLs is the list of URLs the member exposes to the cluster for communication.",
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "etcdserverpbMemberAddRequest": {
- "type": "object",
- "properties": {
- "isLearner": {
- "description": "isLearner indicates if the added member is raft learner.",
- "type": "boolean"
- },
- "peerURLs": {
- "description": "peerURLs is the list of URLs the added member will use to communicate with the cluster.",
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "etcdserverpbMemberAddResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "member": {
- "description": "member is the member information for the added member.",
- "$ref": "#/definitions/etcdserverpbMember"
- },
- "members": {
- "description": "members is a list of all members after adding the new member.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbMember"
- }
- }
- }
- },
- "etcdserverpbMemberListRequest": {
- "type": "object",
- "properties": {
- "linearizable": {
- "type": "boolean"
- }
- }
- },
- "etcdserverpbMemberListResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "members": {
- "description": "members is a list of all members associated with the cluster.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbMember"
- }
- }
- }
- },
- "etcdserverpbMemberPromoteRequest": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the member ID of the member to promote.",
- "type": "string",
- "format": "uint64"
- }
- }
- },
- "etcdserverpbMemberPromoteResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "members": {
- "description": "members is a list of all members after promoting the member.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbMember"
- }
- }
- }
- },
- "etcdserverpbMemberRemoveRequest": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the member ID of the member to remove.",
- "type": "string",
- "format": "uint64"
- }
- }
- },
- "etcdserverpbMemberRemoveResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "members": {
- "description": "members is a list of all members after removing the member.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbMember"
- }
- }
- }
- },
- "etcdserverpbMemberUpdateRequest": {
- "type": "object",
- "properties": {
- "ID": {
- "description": "ID is the member ID of the member to update.",
- "type": "string",
- "format": "uint64"
- },
- "peerURLs": {
- "description": "peerURLs is the new list of URLs the member will use to communicate with the cluster.",
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "etcdserverpbMemberUpdateResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "members": {
- "description": "members is a list of all members after updating the member.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbMember"
- }
- }
- }
- },
- "etcdserverpbMoveLeaderRequest": {
- "type": "object",
- "properties": {
- "targetID": {
- "description": "targetID is the node ID for the new leader.",
- "type": "string",
- "format": "uint64"
- }
- }
- },
- "etcdserverpbMoveLeaderResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "etcdserverpbPutRequest": {
- "type": "object",
- "properties": {
- "ignore_lease": {
- "description": "If ignore_lease is set, etcd updates the key using its current lease.\nReturns an error if the key does not exist.",
- "type": "boolean"
- },
- "ignore_value": {
- "description": "If ignore_value is set, etcd updates the key using its current value.\nReturns an error if the key does not exist.",
- "type": "boolean"
- },
- "key": {
- "description": "key is the key, in bytes, to put into the key-value store.",
- "type": "string",
- "format": "byte"
- },
- "lease": {
- "description": "lease is the lease ID to associate with the key in the key-value store. A lease\nvalue of 0 indicates no lease.",
- "type": "string",
- "format": "int64"
- },
- "prev_kv": {
- "description": "If prev_kv is set, etcd gets the previous key-value pair before changing it.\nThe previous key-value pair will be returned in the put response.",
- "type": "boolean"
- },
- "value": {
- "description": "value is the value, in bytes, to associate with the key in the key-value store.",
- "type": "string",
- "format": "byte"
- }
- }
- },
- "etcdserverpbPutResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "prev_kv": {
- "description": "if prev_kv is set in the request, the previous key-value pair will be returned.",
- "$ref": "#/definitions/mvccpbKeyValue"
- }
- }
- },
- "etcdserverpbRangeRequest": {
- "type": "object",
- "properties": {
- "count_only": {
- "description": "count_only when set returns only the count of the keys in the range.",
- "type": "boolean"
- },
- "key": {
- "description": "key is the first key for the range. If range_end is not given, the request only looks up key.",
- "type": "string",
- "format": "byte"
- },
- "keys_only": {
- "description": "keys_only when set returns only the keys and not the values.",
- "type": "boolean"
- },
- "limit": {
- "description": "limit is a limit on the number of keys returned for the request. When limit is set to 0,\nit is treated as no limit.",
- "type": "string",
- "format": "int64"
- },
- "max_create_revision": {
- "description": "max_create_revision is the upper bound for returned key create revisions; all keys with\ngreater create revisions will be filtered away.",
- "type": "string",
- "format": "int64"
- },
- "max_mod_revision": {
- "description": "max_mod_revision is the upper bound for returned key mod revisions; all keys with\ngreater mod revisions will be filtered away.",
- "type": "string",
- "format": "int64"
- },
- "min_create_revision": {
- "description": "min_create_revision is the lower bound for returned key create revisions; all keys with\nlesser create revisions will be filtered away.",
- "type": "string",
- "format": "int64"
- },
- "min_mod_revision": {
- "description": "min_mod_revision is the lower bound for returned key mod revisions; all keys with\nlesser mod revisions will be filtered away.",
- "type": "string",
- "format": "int64"
- },
- "range_end": {
- "description": "range_end is the upper bound on the requested range [key, range_end).\nIf range_end is '\\0', the range is all keys \u003e= key.\nIf range_end is key plus one (e.g., \"aa\"+1 == \"ab\", \"a\\xff\"+1 == \"b\"),\nthen the range request gets all keys prefixed with key.\nIf both key and range_end are '\\0', then the range request returns all keys.",
- "type": "string",
- "format": "byte"
- },
- "revision": {
- "description": "revision is the point-in-time of the key-value store to use for the range.\nIf revision is less or equal to zero, the range is over the newest key-value store.\nIf the revision has been compacted, ErrCompacted is returned as a response.",
- "type": "string",
- "format": "int64"
- },
- "serializable": {
- "description": "serializable sets the range request to use serializable member-local reads.\nRange requests are linearizable by default; linearizable requests have higher\nlatency and lower throughput than serializable requests but reflect the current\nconsensus of the cluster. For better performance, in exchange for possible stale reads,\na serializable range request is served locally without needing to reach consensus\nwith other nodes in the cluster.",
- "type": "boolean"
- },
- "sort_order": {
- "description": "sort_order is the order for returned sorted results.",
- "$ref": "#/definitions/RangeRequestSortOrder"
- },
- "sort_target": {
- "description": "sort_target is the key-value field to use for sorting.",
- "$ref": "#/definitions/RangeRequestSortTarget"
- }
- }
- },
- "etcdserverpbRangeResponse": {
- "type": "object",
- "properties": {
- "count": {
- "description": "count is set to the number of keys within the range when requested.",
- "type": "string",
- "format": "int64"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "kvs": {
- "description": "kvs is the list of key-value pairs matched by the range request.\nkvs is empty when count is requested.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/mvccpbKeyValue"
- }
- },
- "more": {
- "description": "more indicates if there are more keys to return in the requested range.",
- "type": "boolean"
- }
- }
- },
- "etcdserverpbRequestOp": {
- "type": "object",
- "properties": {
- "request_delete_range": {
- "$ref": "#/definitions/etcdserverpbDeleteRangeRequest"
- },
- "request_put": {
- "$ref": "#/definitions/etcdserverpbPutRequest"
- },
- "request_range": {
- "$ref": "#/definitions/etcdserverpbRangeRequest"
- },
- "request_txn": {
- "$ref": "#/definitions/etcdserverpbTxnRequest"
- }
- }
- },
- "etcdserverpbResponseHeader": {
- "type": "object",
- "properties": {
- "cluster_id": {
- "description": "cluster_id is the ID of the cluster which sent the response.",
- "type": "string",
- "format": "uint64"
- },
- "member_id": {
- "description": "member_id is the ID of the member which sent the response.",
- "type": "string",
- "format": "uint64"
- },
- "raft_term": {
- "description": "raft_term is the raft term when the request was applied.",
- "type": "string",
- "format": "uint64"
- },
- "revision": {
- "description": "revision is the key-value store revision when the request was applied, and it's\nunset (so 0) in case of calls not interacting with key-value store.\nFor watch progress responses, the header.revision indicates progress. All future events\nreceived in this stream are guaranteed to have a higher revision number than the\nheader.revision number.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "etcdserverpbResponseOp": {
- "type": "object",
- "properties": {
- "response_delete_range": {
- "$ref": "#/definitions/etcdserverpbDeleteRangeResponse"
- },
- "response_put": {
- "$ref": "#/definitions/etcdserverpbPutResponse"
- },
- "response_range": {
- "$ref": "#/definitions/etcdserverpbRangeResponse"
- },
- "response_txn": {
- "$ref": "#/definitions/etcdserverpbTxnResponse"
- }
- }
- },
- "etcdserverpbSnapshotRequest": {
- "type": "object"
- },
- "etcdserverpbSnapshotResponse": {
- "type": "object",
- "properties": {
- "blob": {
- "description": "blob contains the next chunk of the snapshot in the snapshot stream.",
- "type": "string",
- "format": "byte"
- },
- "header": {
- "description": "header has the current key-value store information. The first header in the snapshot\nstream indicates the point in time of the snapshot.",
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "remaining_bytes": {
- "type": "string",
- "format": "uint64",
- "title": "remaining_bytes is the number of blob bytes to be sent after this message"
- },
- "version": {
- "description": "local version of server that created the snapshot.\nIn cluster with binaries with different version, each cluster can return different result.\nInforms which etcd server version should be used when restoring the snapshot.",
- "type": "string"
- }
- }
- },
- "etcdserverpbStatusRequest": {
- "type": "object"
- },
- "etcdserverpbStatusResponse": {
- "type": "object",
- "properties": {
- "dbSize": {
- "description": "dbSize is the size of the backend database physically allocated, in bytes, of the responding member.",
- "type": "string",
- "format": "int64"
- },
- "dbSizeInUse": {
- "description": "dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.",
- "type": "string",
- "format": "int64"
- },
- "errors": {
- "description": "errors contains alarm/health information and status.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "isLearner": {
- "description": "isLearner indicates if the member is raft learner.",
- "type": "boolean"
- },
- "leader": {
- "description": "leader is the member ID which the responding member believes is the current leader.",
- "type": "string",
- "format": "uint64"
- },
- "raftAppliedIndex": {
- "description": "raftAppliedIndex is the current raft applied index of the responding member.",
- "type": "string",
- "format": "uint64"
- },
- "raftIndex": {
- "description": "raftIndex is the current raft committed index of the responding member.",
- "type": "string",
- "format": "uint64"
- },
- "raftTerm": {
- "description": "raftTerm is the current raft term of the responding member.",
- "type": "string",
- "format": "uint64"
- },
- "storageVersion": {
- "description": "storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version.",
- "type": "string"
- },
- "version": {
- "description": "version is the cluster protocol version used by the responding member.",
- "type": "string"
- }
- }
- },
- "etcdserverpbTxnRequest": {
- "description": "From google paxosdb paper:\nOur implementation hinges around a powerful primitive which we call MultiOp. All other database\noperations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically\nand consists of three components:\n1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check\nfor the absence or presence of a value, or compare with a given value. Two different tests in the guard\nmay apply to the same or different entries in the database. All tests in the guard are applied and\nMultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise\nit executes f op (see item 3 below).\n2. A list of database operations called t op. Each operation in the list is either an insert, delete, or\nlookup operation, and applies to a single database entry. Two different operations in the list may apply\nto the same or different entries in the database. These operations are executed\nif guard evaluates to\ntrue.\n3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.",
- "type": "object",
- "properties": {
- "compare": {
- "description": "compare is a list of predicates representing a conjunction of terms.\nIf the comparisons succeed, then the success requests will be processed in order,\nand the response will contain their respective responses in order.\nIf the comparisons fail, then the failure requests will be processed in order,\nand the response will contain their respective responses in order.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbCompare"
- }
- },
- "failure": {
- "description": "failure is a list of requests which will be applied when compare evaluates to false.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbRequestOp"
- }
- },
- "success": {
- "description": "success is a list of requests which will be applied when compare evaluates to true.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbRequestOp"
- }
- }
- }
- },
- "etcdserverpbTxnResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "responses": {
- "description": "responses is a list of responses corresponding to the results from applying\nsuccess if succeeded is true or failure if succeeded is false.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/etcdserverpbResponseOp"
- }
- },
- "succeeded": {
- "description": "succeeded is set to true if the compare evaluated to true or false otherwise.",
- "type": "boolean"
- }
- }
- },
- "etcdserverpbWatchCancelRequest": {
- "type": "object",
- "properties": {
- "watch_id": {
- "description": "watch_id is the watcher id to cancel so that no more events are transmitted.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "etcdserverpbWatchCreateRequest": {
- "type": "object",
- "properties": {
- "filters": {
- "description": "filters filter the events at server side before it sends back to the watcher.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/WatchCreateRequestFilterType"
- }
- },
- "fragment": {
- "description": "fragment enables splitting large revisions into multiple watch responses.",
- "type": "boolean"
- },
- "key": {
- "description": "key is the key to register for watching.",
- "type": "string",
- "format": "byte"
- },
- "prev_kv": {
- "description": "If prev_kv is set, created watcher gets the previous KV before the event happens.\nIf the previous KV is already compacted, nothing will be returned.",
- "type": "boolean"
- },
- "progress_notify": {
- "description": "progress_notify is set so that the etcd server will periodically send a WatchResponse with\nno events to the new watcher if there are no recent events. It is useful when clients\nwish to recover a disconnected watcher starting from a recent known revision.\nThe etcd server may decide how often it will send notifications based on current load.",
- "type": "boolean"
- },
- "range_end": {
- "description": "range_end is the end of the range [key, range_end) to watch. If range_end is not given,\nonly the key argument is watched. If range_end is equal to '\\0', all keys greater than\nor equal to the key argument are watched.\nIf the range_end is one bit larger than the given key,\nthen all keys with the prefix (the given key) will be watched.",
- "type": "string",
- "format": "byte"
- },
- "start_revision": {
- "description": "start_revision is an optional revision to watch from (inclusive). No start_revision is \"now\".",
- "type": "string",
- "format": "int64"
- },
- "watch_id": {
- "description": "If watch_id is provided and non-zero, it will be assigned to this watcher.\nSince creating a watcher in etcd is not a synchronous operation,\nthis can be used ensure that ordering is correct when creating multiple\nwatchers on the same stream. Creating a watcher with an ID already in\nuse on the stream will cause an error to be returned.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "etcdserverpbWatchProgressRequest": {
- "description": "Requests the a watch stream progress status be sent in the watch response stream as soon as\npossible.",
- "type": "object"
- },
- "etcdserverpbWatchRequest": {
- "type": "object",
- "properties": {
- "cancel_request": {
- "$ref": "#/definitions/etcdserverpbWatchCancelRequest"
- },
- "create_request": {
- "$ref": "#/definitions/etcdserverpbWatchCreateRequest"
- },
- "progress_request": {
- "$ref": "#/definitions/etcdserverpbWatchProgressRequest"
- }
- }
- },
- "etcdserverpbWatchResponse": {
- "type": "object",
- "properties": {
- "cancel_reason": {
- "description": "cancel_reason indicates the reason for canceling the watcher.",
- "type": "string"
- },
- "canceled": {
- "description": "canceled is set to true if the response is for a cancel watch request.\nNo further events will be sent to the canceled watcher.",
- "type": "boolean"
- },
- "compact_revision": {
- "description": "compact_revision is set to the minimum index if a watcher tries to watch\nat a compacted index.\n\nThis happens when creating a watcher at a compacted revision or the watcher cannot\ncatch up with the progress of the key-value store.\n\nThe client should treat the watcher as canceled and should not try to create any\nwatcher with the same start_revision again.",
- "type": "string",
- "format": "int64"
- },
- "created": {
- "description": "created is set to true if the response is for a create watch request.\nThe client should record the watch_id and expect to receive events for\nthe created watcher from the same stream.\nAll events sent to the created watcher will attach with the same watch_id.",
- "type": "boolean"
- },
- "events": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/mvccpbEvent"
- }
- },
- "fragment": {
- "description": "framgment is true if large watch response was split over multiple responses.",
- "type": "boolean"
- },
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "watch_id": {
- "description": "watch_id is the ID of the watcher that corresponds to the response.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "mvccpbEvent": {
- "type": "object",
- "properties": {
- "kv": {
- "description": "kv holds the KeyValue for the event.\nA PUT event contains current kv pair.\nA PUT event with kv.Version=1 indicates the creation of a key.\nA DELETE/EXPIRE event contains the deleted key with\nits modification revision set to the revision of deletion.",
- "$ref": "#/definitions/mvccpbKeyValue"
- },
- "prev_kv": {
- "description": "prev_kv holds the key-value pair before the event happens.",
- "$ref": "#/definitions/mvccpbKeyValue"
- },
- "type": {
- "description": "type is the kind of event. If type is a PUT, it indicates\nnew data has been stored to the key. If type is a DELETE,\nit indicates the key was deleted.",
- "$ref": "#/definitions/EventEventType"
- }
- }
- },
- "mvccpbKeyValue": {
- "type": "object",
- "properties": {
- "create_revision": {
- "description": "create_revision is the revision of last creation on this key.",
- "type": "string",
- "format": "int64"
- },
- "key": {
- "description": "key is the key in bytes. An empty key is not allowed.",
- "type": "string",
- "format": "byte"
- },
- "lease": {
- "description": "lease is the ID of the lease that attached to key.\nWhen the attached lease expires, the key will be deleted.\nIf lease is 0, then no lease is attached to the key.",
- "type": "string",
- "format": "int64"
- },
- "mod_revision": {
- "description": "mod_revision is the revision of last modification on this key.",
- "type": "string",
- "format": "int64"
- },
- "value": {
- "description": "value is the value held by the key, in bytes.",
- "type": "string",
- "format": "byte"
- },
- "version": {
- "description": "version is the version of the key. A deletion resets\nthe version to zero and any modification of the key\nincreases its version.",
- "type": "string",
- "format": "int64"
- }
- }
- },
- "protobufAny": {
- "type": "object",
- "properties": {
- "type_url": {
- "type": "string"
- },
- "value": {
- "type": "string",
- "format": "byte"
- }
- }
- },
- "runtimeError": {
- "type": "object",
- "properties": {
- "code": {
- "type": "integer",
- "format": "int32"
- },
- "details": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/protobufAny"
- }
- },
- "error": {
- "type": "string"
- },
- "message": {
- "type": "string"
- }
- }
- },
- "runtimeStreamError": {
- "type": "object",
- "properties": {
- "details": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/protobufAny"
- }
- },
- "grpc_code": {
- "type": "integer",
- "format": "int32"
- },
- "http_code": {
- "type": "integer",
- "format": "int32"
- },
- "http_status": {
- "type": "string"
- },
- "message": {
- "type": "string"
- }
- }
- }
- },
- "securityDefinitions": {
- "ApiKey": {
- "type": "apiKey",
- "name": "Authorization",
- "in": "header"
- }
- },
- "security": [
- {
- "ApiKey": []
- }
- ]
-}
\ No newline at end of file
diff --git a/Documentation/dev-guide/apispec/swagger/v3election.swagger.json b/Documentation/dev-guide/apispec/swagger/v3election.swagger.json
deleted file mode 100644
index 7238a44e792..00000000000
--- a/Documentation/dev-guide/apispec/swagger/v3election.swagger.json
+++ /dev/null
@@ -1,427 +0,0 @@
-{
- "swagger": "2.0",
- "info": {
- "title": "server/etcdserver/api/v3election/v3electionpb/v3election.proto",
- "version": "version not set"
- },
- "consumes": [
- "application/json"
- ],
- "produces": [
- "application/json"
- ],
- "paths": {
- "/v3/election/campaign": {
- "post": {
- "summary": "Campaign waits to acquire leadership in an election, returning a LeaderKey\nrepresenting the leadership if successful. The LeaderKey can then be used\nto issue new values on the election, transactionally guard API requests on\nleadership still being held, and resign from the election.",
- "operationId": "Election_Campaign",
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/v3electionpbCampaignResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- },
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/v3electionpbCampaignRequest"
- }
- }
- ],
- "tags": [
- "Election"
- ]
- }
- },
- "/v3/election/leader": {
- "post": {
- "summary": "Leader returns the current election proclamation, if any.",
- "operationId": "Election_Leader",
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/v3electionpbLeaderResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- },
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/v3electionpbLeaderRequest"
- }
- }
- ],
- "tags": [
- "Election"
- ]
- }
- },
- "/v3/election/observe": {
- "post": {
- "summary": "Observe streams election proclamations in-order as made by the election's\nelected leaders.",
- "operationId": "Election_Observe",
- "responses": {
- "200": {
- "description": "A successful response.(streaming responses)",
- "schema": {
- "type": "object",
- "properties": {
- "result": {
- "$ref": "#/definitions/v3electionpbLeaderResponse"
- },
- "error": {
- "$ref": "#/definitions/runtimeStreamError"
- }
- },
- "title": "Stream result of v3electionpbLeaderResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- },
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/v3electionpbLeaderRequest"
- }
- }
- ],
- "tags": [
- "Election"
- ]
- }
- },
- "/v3/election/proclaim": {
- "post": {
- "summary": "Proclaim updates the leader's posted value with a new value.",
- "operationId": "Election_Proclaim",
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/v3electionpbProclaimResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- },
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/v3electionpbProclaimRequest"
- }
- }
- ],
- "tags": [
- "Election"
- ]
- }
- },
- "/v3/election/resign": {
- "post": {
- "summary": "Resign releases election leadership so other campaigners may acquire\nleadership on the election.",
- "operationId": "Election_Resign",
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/v3electionpbResignResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- },
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/v3electionpbResignRequest"
- }
- }
- ],
- "tags": [
- "Election"
- ]
- }
- }
- },
- "definitions": {
- "etcdserverpbResponseHeader": {
- "type": "object",
- "properties": {
- "cluster_id": {
- "type": "string",
- "format": "uint64",
- "description": "cluster_id is the ID of the cluster which sent the response."
- },
- "member_id": {
- "type": "string",
- "format": "uint64",
- "description": "member_id is the ID of the member which sent the response."
- },
- "revision": {
- "type": "string",
- "format": "int64",
- "description": "revision is the key-value store revision when the request was applied, and it's\nunset (so 0) in case of calls not interacting with key-value store.\nFor watch progress responses, the header.revision indicates progress. All future events\nreceived in this stream are guaranteed to have a higher revision number than the\nheader.revision number."
- },
- "raft_term": {
- "type": "string",
- "format": "uint64",
- "description": "raft_term is the raft term when the request was applied."
- }
- }
- },
- "mvccpbKeyValue": {
- "type": "object",
- "properties": {
- "key": {
- "type": "string",
- "format": "byte",
- "description": "key is the key in bytes. An empty key is not allowed."
- },
- "create_revision": {
- "type": "string",
- "format": "int64",
- "description": "create_revision is the revision of last creation on this key."
- },
- "mod_revision": {
- "type": "string",
- "format": "int64",
- "description": "mod_revision is the revision of last modification on this key."
- },
- "version": {
- "type": "string",
- "format": "int64",
- "description": "version is the version of the key. A deletion resets\nthe version to zero and any modification of the key\nincreases its version."
- },
- "value": {
- "type": "string",
- "format": "byte",
- "description": "value is the value held by the key, in bytes."
- },
- "lease": {
- "type": "string",
- "format": "int64",
- "description": "lease is the ID of the lease that attached to key.\nWhen the attached lease expires, the key will be deleted.\nIf lease is 0, then no lease is attached to the key."
- }
- }
- },
- "protobufAny": {
- "type": "object",
- "properties": {
- "type_url": {
- "type": "string"
- },
- "value": {
- "type": "string",
- "format": "byte"
- }
- }
- },
- "runtimeError": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- },
- "code": {
- "type": "integer",
- "format": "int32"
- },
- "message": {
- "type": "string"
- },
- "details": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/protobufAny"
- }
- }
- }
- },
- "runtimeStreamError": {
- "type": "object",
- "properties": {
- "grpc_code": {
- "type": "integer",
- "format": "int32"
- },
- "http_code": {
- "type": "integer",
- "format": "int32"
- },
- "message": {
- "type": "string"
- },
- "http_status": {
- "type": "string"
- },
- "details": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/protobufAny"
- }
- }
- }
- },
- "v3electionpbCampaignRequest": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "format": "byte",
- "description": "name is the election's identifier for the campaign."
- },
- "lease": {
- "type": "string",
- "format": "int64",
- "description": "lease is the ID of the lease attached to leadership of the election. If the\nlease expires or is revoked before resigning leadership, then the\nleadership is transferred to the next campaigner, if any."
- },
- "value": {
- "type": "string",
- "format": "byte",
- "description": "value is the initial proclaimed value set when the campaigner wins the\nelection."
- }
- }
- },
- "v3electionpbCampaignResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "leader": {
- "$ref": "#/definitions/v3electionpbLeaderKey",
- "description": "leader describes the resources used for holding leadereship of the election."
- }
- }
- },
- "v3electionpbLeaderKey": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "format": "byte",
- "description": "name is the election identifier that correponds to the leadership key."
- },
- "key": {
- "type": "string",
- "format": "byte",
- "description": "key is an opaque key representing the ownership of the election. If the key\nis deleted, then leadership is lost."
- },
- "rev": {
- "type": "string",
- "format": "int64",
- "description": "rev is the creation revision of the key. It can be used to test for ownership\nof an election during transactions by testing the key's creation revision\nmatches rev."
- },
- "lease": {
- "type": "string",
- "format": "int64",
- "description": "lease is the lease ID of the election leader."
- }
- }
- },
- "v3electionpbLeaderRequest": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "format": "byte",
- "description": "name is the election identifier for the leadership information."
- }
- }
- },
- "v3electionpbLeaderResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "kv": {
- "$ref": "#/definitions/mvccpbKeyValue",
- "description": "kv is the key-value pair representing the latest leader update."
- }
- }
- },
- "v3electionpbProclaimRequest": {
- "type": "object",
- "properties": {
- "leader": {
- "$ref": "#/definitions/v3electionpbLeaderKey",
- "description": "leader is the leadership hold on the election."
- },
- "value": {
- "type": "string",
- "format": "byte",
- "description": "value is an update meant to overwrite the leader's current value."
- }
- }
- },
- "v3electionpbProclaimResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- },
- "v3electionpbResignRequest": {
- "type": "object",
- "properties": {
- "leader": {
- "$ref": "#/definitions/v3electionpbLeaderKey",
- "description": "leader is the leadership to relinquish by resignation."
- }
- }
- },
- "v3electionpbResignResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- }
- }
-}
diff --git a/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json b/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json
deleted file mode 100644
index 5a45bdd9b2a..00000000000
--- a/Documentation/dev-guide/apispec/swagger/v3lock.swagger.json
+++ /dev/null
@@ -1,187 +0,0 @@
-{
- "swagger": "2.0",
- "info": {
- "title": "server/etcdserver/api/v3lock/v3lockpb/v3lock.proto",
- "version": "version not set"
- },
- "consumes": [
- "application/json"
- ],
- "produces": [
- "application/json"
- ],
- "paths": {
- "/v3/lock/lock": {
- "post": {
- "summary": "Lock acquires a distributed shared lock on a given named lock.\nOn success, it will return a unique key that exists so long as the\nlock is held by the caller. This key can be used in conjunction with\ntransactions to safely ensure updates to etcd only occur while holding\nlock ownership. The lock is held until Unlock is called on the key or the\nlease associate with the owner expires.",
- "operationId": "Lock_Lock",
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/v3lockpbLockResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- },
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/v3lockpbLockRequest"
- }
- }
- ],
- "tags": [
- "Lock"
- ]
- }
- },
- "/v3/lock/unlock": {
- "post": {
- "summary": "Unlock takes a key returned by Lock and releases the hold on lock. The\nnext Lock caller waiting for the lock will then be woken up and given\nownership of the lock.",
- "operationId": "Lock_Unlock",
- "responses": {
- "200": {
- "description": "A successful response.",
- "schema": {
- "$ref": "#/definitions/v3lockpbUnlockResponse"
- }
- },
- "default": {
- "description": "An unexpected error response.",
- "schema": {
- "$ref": "#/definitions/runtimeError"
- }
- }
- },
- "parameters": [
- {
- "name": "body",
- "in": "body",
- "required": true,
- "schema": {
- "$ref": "#/definitions/v3lockpbUnlockRequest"
- }
- }
- ],
- "tags": [
- "Lock"
- ]
- }
- }
- },
- "definitions": {
- "etcdserverpbResponseHeader": {
- "type": "object",
- "properties": {
- "cluster_id": {
- "type": "string",
- "format": "uint64",
- "description": "cluster_id is the ID of the cluster which sent the response."
- },
- "member_id": {
- "type": "string",
- "format": "uint64",
- "description": "member_id is the ID of the member which sent the response."
- },
- "revision": {
- "type": "string",
- "format": "int64",
- "description": "revision is the key-value store revision when the request was applied, and it's\nunset (so 0) in case of calls not interacting with key-value store.\nFor watch progress responses, the header.revision indicates progress. All future events\nreceived in this stream are guaranteed to have a higher revision number than the\nheader.revision number."
- },
- "raft_term": {
- "type": "string",
- "format": "uint64",
- "description": "raft_term is the raft term when the request was applied."
- }
- }
- },
- "protobufAny": {
- "type": "object",
- "properties": {
- "type_url": {
- "type": "string"
- },
- "value": {
- "type": "string",
- "format": "byte"
- }
- }
- },
- "runtimeError": {
- "type": "object",
- "properties": {
- "error": {
- "type": "string"
- },
- "code": {
- "type": "integer",
- "format": "int32"
- },
- "message": {
- "type": "string"
- },
- "details": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/protobufAny"
- }
- }
- }
- },
- "v3lockpbLockRequest": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "format": "byte",
- "description": "name is the identifier for the distributed shared lock to be acquired."
- },
- "lease": {
- "type": "string",
- "format": "int64",
- "description": "lease is the ID of the lease that will be attached to ownership of the\nlock. If the lease expires or is revoked and currently holds the lock,\nthe lock is automatically released. Calls to Lock with the same lease will\nbe treated as a single acquisition; locking twice with the same lease is a\nno-op."
- }
- }
- },
- "v3lockpbLockResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- },
- "key": {
- "type": "string",
- "format": "byte",
- "description": "key is a key that will exist on etcd for the duration that the Lock caller\nowns the lock. Users should not modify this key or the lock may exhibit\nundefined behavior."
- }
- }
- },
- "v3lockpbUnlockRequest": {
- "type": "object",
- "properties": {
- "key": {
- "type": "string",
- "format": "byte",
- "description": "key is the lock ownership key granted by Lock."
- }
- }
- },
- "v3lockpbUnlockResponse": {
- "type": "object",
- "properties": {
- "header": {
- "$ref": "#/definitions/etcdserverpbResponseHeader"
- }
- }
- }
- }
-}
diff --git a/Documentation/postmortems/v3.5-data-inconsistency.md b/Documentation/postmortems/v3.5-data-inconsistency.md
deleted file mode 100644
index 718097657b4..00000000000
--- a/Documentation/postmortems/v3.5-data-inconsistency.md
+++ /dev/null
@@ -1,142 +0,0 @@
-# v3.5 data inconsistency postmortem
-
-| | |
-|---------|------------|
-| Authors | serathius@ |
-| Date | 2022-04-20 |
-| Status | published |
-
-## Summary
-
-| | |
-|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Summary | Code refactor in v3.5.0 resulted in consistent index not being saved atomically. Independent crash could lead to committed transactions are not reflected on all the members. |
-| Impact | No user reported problems in production as triggering the issue required frequent crashes, however issue was critical enough to motivate a public statement. Main impact comes from loosing user trust into etcd reliability. |
-
-## Background
-
-etcd v3 state is preserved on disk in two forms write ahead log (WAL) and database state (DB).
-etcd v3.5 also still maintains v2 state, however it's deprecated and not relevant to the issue in this postmortem.
-
-WAL stores history of changes for etcd state and database represents state at one point.
-To know which point of history database is representing, it stores consistent index (CI).
-It's a special metadata field that points to last entry in WAL that it has seen.
-
-When etcd is updating database state, it replays entries from WAL and updates the consistent index to point to new entry.
-This operation is required to be [atomic](https://en.wikipedia.org/wiki/Atomic_commit).
-A partial fail would mean that database and WAL would no longer match, so some entries would be either skipped (if only CI is updated) or executed twice (if only changes are applied).
-This is especially important for distributed system like etcd, where there are multiple cluster members, each applying the WAL entries to their database.
-Correctness of the system depends on assumption that every member of the cluster, while replying WAL entries, will reach the same state.
-
-## Root cause
-
-To simplify managing consistency index, etcd has introduced backend hooks in https://github.com/etcd-io/etcd/pull/12855.
-Goal was to ensure that consistency index is always updated, by automatically triggering update during commit.
-Implementation was as follows, before applying the WAL entries, etcd updated in memory value of consistent index.
-As part of transaction commit process, a database hook would read the value of consistent index and store it to database.
-
-Problem is that in memory value of consistent index is shared, and there might be other in flight transactions apart from serial WAL apply flow.
-So if we imagine scenario:
-1. etcd server starts an apply workflow, and it just sets a new consistent index value.
-2. The periodic commit is triggered, and it executes the backend hook and saves consistent index from apply workflow.
-3. etcd server finished an apply workflow, saves new changes and saves same value of consistent index again.
-
-Between second and third point there is a very small window where consistent index is increased without applying entry from WAL.
-
-## Trigger
-
-If etcd crashed after consistency index is saved, but before to apply workflow finished it would lead to data inconsistency.
-When recovering the data etcd would skip executing changes from failed apply workflow, assuming they have been already executed.
-
-This follows the issue reports and code used to reproduce the issue where trigger was etcd crashing under high request load.
-Etcd v3.5.0 was released with bug (https://github.com/etcd-io/etcd/pull/13505) that could cause etcd to crash that was fixed in v3.5.1.
-Apart from that all reports described etcd running under high memory pressure, causing it to go out of memory from time to time.
-Reproduction run etcd under high stress and randomly killed one of the members using SIGKILL signal (not recoverable immediate process death).
-
-## Detection
-
-For single member cluster it is totally undetectable.
-There is no mechanism or tool for verifying that state database matches WAL.
-
-In cluster with multiple members it would mean that one of the members that crashed, will missing changes from failed apply workflow.
-This means that it will have different state of database and will return different hash via `HashKV` grpc call.
-
-There is an automatic mechanism to detect data inconsistency.
-It can be executed during etcd start via `--experimental-initial-corrupt-check` and periodically via `--experimental-corrupt-check-time`.
-Both checks however have a flaw, they depend on `HashKV` grpc method, which might fail causing the check to pass.
-
-In multi member etcd cluster, each member can run with different performance and be at different stage of applying the WAL log.
-Comparing database hashes between multiple etcd members requires all hashes to be calculated at the same change.
-This is done by requesting hash for the same `revision` (version of key value store).
-However, it will not work if the provided revision is not available on the members.
-This can happen on very slow members, or in cases where corruption has lead revision numbers to diverge.
-
-This means that for this issue, the corrupt check is only reliable during etcd start just after etcd crashes.
-
-## Impact
-
-We are not aware any cases of users reporting a data corruption in production environment.
-
-However, issue was critical enough to motivate a public statement.
-Main impact comes from loosing user trust into etcd reliability.
-
-## Lessons learned
-
-### What went well
-
-* Multiple maintainers were able to work effectively on reproducing and fixing the issue. As they are in different timezones, there was always someone working on the issue.
-* When fixing the main data inconsistency we have found multiple other edge cases that could lead to data corruption (https://github.com/etcd-io/etcd/issues/13514, https://github.com/etcd-io/etcd/issues/13922, https://github.com/etcd-io/etcd/issues/13937).
-
-### What went wrong
-
-* No users enable data corruption detection as it is still an experimental feature introduced in v3.3. All reported cases where detected manually making it almost impossible to reproduce.
-* etcd has functional tests designed to detect such problems, however they are unmaintained, flaky and are missing crucial scenarios.
-* etcd v3.5 release was not qualified as comprehensive as previous ones. Older maintainers run manual qualification process that is no longer known or executed.
-* etcd apply code is so complicated that fixing the data inconsistency took almost 2 weeks and multiple tries. Fix needed to be so complicated that we needed to develop automatic validation for it (https://github.com/etcd-io/etcd/pull/13885).
-* etcd v3.5 was recommended for production without enough insight on the production adoption. Production ready recommendations based on after some internal feedback... to get diverse usage, but the user's hold on till someone else will discover issues.
-
-### Where we got lucky
-
-* We reproduced the issue using etcd functional only because weird partition setup on workstation. Functional tests store etcd data under `/tmp` usually mounted to in memory filesystem. Problem was reproduced only because one of the maintainers has `/tmp` mounted to standard disk.
-
-## Action items
-
-Action items should directly address items listed in lessons learned.
-We should double down on things that went well, fix things that went wrong, and stop depending on luck.
-
-Action fall under three types, and we should have at least one item per type. Types:
-* Prevent - Prevent similar issues from occurring. In this case, what testing we should introduce to find data inconsistency issues before release, preventing publishing broken release.
-* Detect - Be more effective in detecting when similar issues occur. In this case, improve mechanism to detect data inconsistency issue so users will be automatically informed.
-* Mitigate - Reduce time to recovery for users. In this case, how we ensure that users are able to quickly fix data inconsistency.
-
-Actions should not be restricted to fixing the immediate issues and also propose long term strategic improvements.
-To reflect this action items should have assigned priority:
-* P0 - Critical for reliability of the v3.5 release. Should be prioritized this over all other work and backported to v3.5.
-* P1 - Important for long term success of the project. Blocks v3.6 release.
-* P2 - Stretch goals that would be nice to have for v3.6, however should not be blocking.
-
-| Action Item | Type | Priority | Bug |
-|-------------------------------------------------------------------------------------|----------|----------|----------------------------------------------|
-| etcd testing can reproduce historical data inconsistency issues | Prevent | P0 | https://github.com/etcd-io/etcd/issues/14045 |
-| etcd detects data corruption by default | Detect | P0 | https://github.com/etcd-io/etcd/issues/14039 |
-| etcd testing is high quality, easy to maintain and expand | Prevent | P1 | https://github.com/etcd-io/etcd/issues/13637 |
-| etcd apply code should be easy to understand and validate correctness | Prevent | P1 | |
-| Critical etcd features are not abandoned when contributors move on | Prevent | P1 | https://github.com/etcd-io/etcd/issues/13775 |
-| etcd is continuously qualified with failure injection | Prevent | P1 | |
-| etcd can reliably detect data corruption (hash is linearizable) | Detect | P1 | |
-| etcd checks consistency of snapshots sent between leader and followers | Detect | P1 | https://github.com/etcd-io/etcd/issues/13973 |
-| etcd recovery from data inconsistency procedures are documented and tested | Mitigate | P1 | |
-| etcd can imminently detect and recover from data corruption (implement Merkle root) | Mitigate | P2 | https://github.com/etcd-io/etcd/issues/13839 |
-
-## Timeline
-
-| Date | Event |
-|------------|-----------------------------------------------------------------------------------------------------------------------|
-| 2021-05-08 | Pull request that caused data corruption was merged - https://github.com/etcd-io/etcd/pull/12855 |
-| 2021-06-16 | Release v3.5.0 with data corruption was published - https://github.com/etcd-io/etcd/releases/tag/v3.5.0 |
-| 2021-12-01 | Report of data corruption - https://github.com/etcd-io/etcd/issues/13514 |
-| 2021-01-28 | Report of data corruption - https://github.com/etcd-io/etcd/issues/13654 |
-| 2022-03-08 | Report of data corruption - https://github.com/etcd-io/etcd/issues/13766 |
-| 2022-03-25 | Corruption confirmed by one of the maintainers - https://github.com/etcd-io/etcd/issues/13766#issuecomment-1078897588 |
-| 2022-03-29 | Statement about the corruption was sent to etcd-dev@googlegroups.com and dev@kubernetes.io |
-| 2022-04-24 | Release v3.5.3 with fix was published - https://github.com/etcd-io/etcd/releases/tag/v3.5.3 |
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
deleted file mode 100644
index 8b7cad3b5aa..00000000000
--- a/GOVERNANCE.md
+++ /dev/null
@@ -1,101 +0,0 @@
-# etcd Governance
-
-## Principles
-
-The etcd community adheres to the following principles:
-
-- Open: etcd is open source.
-- Welcoming and respectful: See [Code of Conduct](code-of-conduct.md).
-- Transparent and accessible: Changes to the etcd code repository and CNCF related
-activities (e.g. level, involvement, etc) are done in public.
-- Merit: Ideas and contributions are accepted according to their technical merit for
-the betterment of the project. For specific guidance on practical contribution steps
-please see [CONTRIBUTING](./CONTRIBUTING.md) guide.
-
-## Maintainers
-
-Maintainers are first and foremost contributors that have shown they
-are committed to the long term success of a project. Maintainership is about building
-trust with the current maintainers of the project and being a person that they can
-depend on to make decisions in the best interest of the project in a consistent manner.
-The maintainers role can be a top-level or restricted to certain package/feature
-depending upon their commitment in fulfilling the expected responsibilities as explained
-below.
-
-### Top-level maintainer
-
-- Running the etcd release processes
-- Ownership of test and debug infrastructure
-- Triage GitHub issues to keep the issue count low (goal: under 100)
-- Regularly review GitHub pull requests across all pkgs
-- Providing cross pkg design review
-- Monitor email aliases
-- Participate when called upon in the [security disclosure and release process](security/README.md)
-- General project maintenance
-
-### Package/feature maintainer
-
-- Ownership of test and debug failures in a pkg/feature
-- Resolution of bugs triaged to a package/feature
-- Regularly review pull requests to the pkg subsystem
-
-### Nomination and retiring of maintainers
-
-[Maintainers](./MAINTAINERS) file on the `main` branch reflects the latest
-state of project maintainers. List of existing maintainers should be kept up to
-date by existing maintainers to properly reflect community health and to gain
-better understanding of recruiting need for new maintainers. Changes to list of
-maintainers should be done by opening a pull request and CCing all the existing
-maintainers.
-
-Contributors who are interested in becoming a maintainer, if performing relevant
-responsibilities, should discuss their interest with the existing maintainers.
-New maintainers must be nominated by an existing maintainer and must be elected
-by a supermajority of maintainers with a fallback on lazy consensus after three
-business weeks inactive voting period and as long as two maintainers are on board.
-
-Life priorities, interests, and passions can change. Maintainers can retire and
-move to the [emeritus status](./README.md#etcd-emeritus-maintainers). If a
-maintainer needs to step down, they should inform other maintainers, if possible,
-help find someone to pick up the related work. At the very least, ensure the
-related work can be continued. Afterward they can remove themselves from list of
-existing maintainers.
-
-If a maintainer is not been performing their duties for period of 12 months,
-they can be removed by other maintainers. In that case inactive maintainer will
-be first notified via an email. If situation doesn't improve, they will be
-removed. If an emeritus maintainer wants to regain an active role, they can do
-so by renewing their contributions. Active maintainers should welcome such a move.
-Retiring of other maintainers or regaining the status should require approval
-of at least two active maintainers.
-
-## Reviewers
-
-[Reviewers](./MAINTAINERS) are contributors who have demonstrated greater skill in
-reviewing the code contribution from other contributors. Their LGTM counts towards
-merging a code change into the project. A reviewer is generally on the ladder towards
-maintainership. New reviewers must be nominated by an existing maintainer and must be
-elected by a supermajority of maintainers with a fallback on lazy consensus after three
-business weeks inactive voting period and as long as two maintainers are on board.
-Reviewers can be removed by a supermajority of the maintainers or can resign by notifying
-the maintainers.
-
-## Decision making process
-
-Decisions are built on consensus between maintainers publicly. Proposals and ideas
-can either be submitted for agreement via a GitHub issue or PR, or by sending an email
-to `etcd-maintainers@googlegroups.com`.
-
-## Conflict resolution
-
-In general, we prefer that technical issues and maintainer membership are amicably
-worked out between the persons involved. However, any technical dispute that has
-reached an impasse with a subset of the community, any contributor may open a GitHub
-issue or PR or send an email to `etcd-maintainers@googlegroups.com`. If the
-maintainers themselves cannot decide an issue, the issue will be resolved by a
-supermajority of the maintainers with a fallback on lazy consensus after three business
-weeks inactive voting period and as long as two maintainers are on board.
-
-## Changes in Governance
-
-Changes in project governance could be initiated by opening a GitHub PR.
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/MAINTAINERS b/MAINTAINERS
deleted file mode 100644
index 253c562fda6..00000000000
--- a/MAINTAINERS
+++ /dev/null
@@ -1,20 +0,0 @@
-# The official list of maintainers and reviewers for the project maintenance.
-#
-# Refer to the GOVERNANCE.md for description of the roles.
-#
-# Names should be added to this file like so:
-# Individual's name (@GITHUB_HANDLE) pkg:*
-# Individual's name (@GITHUB_HANDLE) pkg:*
-#
-# Please keep the list sorted.
-
-# MAINTAINERS
-Benjamin Wang (ahrtr@) pkg:*
-Hitoshi Mitake (@mitake) pkg:*
-Marek Siarkowicz (@serathius) pkg:*
-Piotr Tabor (@ptabor) pkg:*
-Sahdev Zala (@spzala) pkg:*
-Sam Batschelet (@hexfusion) pkg:*
-Tobias Grieger (@tbg) pkg:go.etcd.io/etcd/raft
-
-# REVIEWERS
diff --git a/Makefile b/Makefile
deleted file mode 100644
index c9d0ac44247..00000000000
--- a/Makefile
+++ /dev/null
@@ -1,172 +0,0 @@
-.PHONY: build
-build:
- GO_BUILD_FLAGS="${GO_BUILD_FLAGS} -v" ./scripts/build.sh
- ./bin/etcd --version
- ./bin/etcdctl version
- ./bin/etcdutl version
-
-.PHONY: tools
-tools:
- GO_BUILD_FLAGS="${GO_BUILD_FLAGS} -v" ./scripts/build_tools.sh
-
-# Tests
-
-GO_TEST_FLAGS?=
-
-.PHONY: test
-test:
- PASSES="unit integration release e2e" ./scripts/test.sh $(GO_TEST_FLAGS)
-
-.PHONY: test-unit
-test-unit:
- PASSES="unit" ./scripts/test.sh $(GO_TEST_FLAGS)
-
-.PHONY: test-integration
-test-integration:
- PASSES="integration" ./scripts/test.sh $(GO_TEST_FLAGS)
-
-.PHONY: test-e2e
-test-e2e: build
- PASSES="e2e" ./scripts/test.sh $(GO_TEST_FLAGS)
-
-.PHONY: test-e2e-release
-test-e2e-release: build
- PASSES="release e2e" ./scripts/test.sh $(GO_TEST_FLAGS)
-
-.PHONY: test-linearizability
-test-linearizability:
- PASSES="linearizability" ./scripts/test.sh $(GO_TEST_FLAGS)
-
-.PHONY: fuzz
-fuzz:
- ./scripts/fuzzing.sh
-
-# Static analysis
-
-verify: verify-gofmt verify-bom verify-lint verify-dep verify-shellcheck verify-goword \
- verify-govet verify-license-header verify-receiver-name verify-mod-tidy verify-shellcheck \
- verify-shellws verify-proto-annotations verify-genproto
-fix: fix-bom fix-lint
- ./scripts/fix.sh
-
-.PHONY: verify-gofmt
-verify-gofmt:
- PASSES="gofmt" ./scripts/test.sh
-
-.PHONY: verify-bom
-verify-bom:
- PASSES="bom" ./scripts/test.sh
-
-.PHONY: update-bom
-fix-bom:
- ./scripts/updatebom.sh
-
-.PHONY: verify-dep
-verify-dep:
- PASSES="dep" ./scripts/test.sh
-
-.PHONY: verify-lint
-verify-lint:
- golangci-lint run
-
-.PHONY: update-lint
-fix-lint:
- golangci-lint run --fix
-
-.PHONY: verify-shellcheck
-verify-shellcheck:
- PASSES="shellcheck" ./scripts/test.sh
-
-.PHONY: verify-goword
-verify-goword:
- PASSES="goword" ./scripts/test.sh
-
-.PHONY: verify-govet
-verify-govet:
- PASSES="govet" ./scripts/test.sh
-
-.PHONY: verify-license-header
-verify-license-header:
- PASSES="license_header" ./scripts/test.sh
-
-.PHONY: verify-receiver-name
-verify-receiver-name:
- PASSES="receiver_name" ./scripts/test.sh
-
-.PHONY: verify-mod-tidy
-verify-mod-tidy:
- PASSES="mod_tidy" ./scripts/test.sh
-
-.PHONY: verify-shellws
-verify-shellws:
- PASSES="shellws" ./scripts/test.sh
-
-.PHONY: verify-proto-annotations
-verify-proto-annotations:
- PASSES="proto_annotations" ./scripts/test.sh
-
-.PHONY: verify-genproto
-verify-genproto:
- PASSES="genproto" ./scripts/test.sh
-
-# Failpoints
-
-GOFAIL_VERSION = $(shell cd tools/mod && go list -m -f {{.Version}} go.etcd.io/gofail)
-
-.PHONY: gofail-enable
-gofail-enable: install-gofail
- gofail enable server/etcdserver/ server/storage/backend/ server/storage/mvcc/ server/storage/wal/
- cd ./server && go get go.etcd.io/gofail@${GOFAIL_VERSION}
- cd ./etcdutl && go get go.etcd.io/gofail@${GOFAIL_VERSION}
- cd ./etcdctl && go get go.etcd.io/gofail@${GOFAIL_VERSION}
- cd ./tests && go get go.etcd.io/gofail@${GOFAIL_VERSION}
-
-.PHONY: gofail-disable
-gofail-disable: install-gofail
- gofail disable server/etcdserver/ server/storage/backend/ server/storage/mvcc/ server/storage/wal/
- cd ./server && go mod tidy
- cd ./etcdutl && go mod tidy
- cd ./etcdctl && go mod tidy
- cd ./tests && go mod tidy
-
-.PHONY: install-gofail
-install-gofail:
- cd tools/mod; go install go.etcd.io/gofail@${GOFAIL_VERSION}
-
-build-failpoints-release-3.5:
- rm -rf /tmp/etcd-release-3.5/
- mkdir -p /tmp/etcd-release-3.5/
- cd /tmp/etcd-release-3.5/; \
- git clone --depth 1 --branch release-3.5 https://github.com/etcd-io/etcd.git .; \
- go get go.etcd.io/gofail@${GOFAIL_VERSION}; \
- (cd server; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
- (cd etcdctl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
- (cd etcdutl; go get go.etcd.io/gofail@${GOFAIL_VERSION}); \
- FAILPOINTS=true ./build;
- mkdir -p ./bin
- cp /tmp/etcd-release-3.5/bin/etcd ./bin/etcd
-
-build-failpoints-release-3.4:
- rm -rf /tmp/etcd-release-3.4/
- mkdir -p /tmp/etcd-release-3.4/
- cd /tmp/etcd-release-3.4/; \
- git clone --depth 1 --branch release-3.4 https://github.com/etcd-io/etcd.git .; \
- go get go.etcd.io/gofail@${GOFAIL_VERSION}; \
- FAILPOINTS=true ./build;
- mkdir -p ./bin
- cp /tmp/etcd-release-3.4/bin/etcd ./bin/etcd
-
-# Cleanup
-
-clean:
- rm -f ./codecov
- rm -rf ./covdir
- rm -f ./bin/Dockerfile-release*
- rm -rf ./bin/etcd*
- rm -rf ./default.etcd
- rm -rf ./tests/e2e/default.etcd
- rm -rf ./release
- rm -rf ./coverage/*.err ./coverage/*.out
- rm -rf ./tests/e2e/default.proxy
- rm -rf ./bin/shellcheck*
- find ./ -name "127.0.0.1:*" -o -name "localhost:*" -o -name "*.log" -o -name "agent-*" -o -name "*.coverprofile" -o -name "testname-proxy-*" -delete
diff --git a/Procfile b/Procfile
index 92ef3763958..bf4c502506d 100644
--- a/Procfile
+++ b/Procfile
@@ -1,9 +1,7 @@
-# Use goreman to run `go install github.com/mattn/goreman@latest`
+# Use goreman to run `go get github.com/mattn/goreman`
# Change the path of bin/etcd if etcd is located elsewhere
-
-etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr
-etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr
-etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr
-#proxy: bin/etcd grpc-proxy start --endpoints=127.0.0.1:2379,127.0.0.1:22379,127.0.0.1:32379 --listen-addr=127.0.0.1:23790 --advertise-client-url=127.0.0.1:23790 --enable-pprof
-
-# A learner node can be started using Procfile.learner
+etcd1: go run ./etcd_backend/main.go --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+etcd2: go run ./etcd_backend/main.go --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+etcd3: go run ./etcd_backend/main.go --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
+# in future, use proxy to listen on 2379
+#proxy: bin/etcd --name infra-proxy1 --proxy=on --listen-client-urls http://127.0.0.1:2378 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --enable-pprof
diff --git a/Procfile.learner b/Procfile.learner
deleted file mode 100644
index 1517d3f2be8..00000000000
--- a/Procfile.learner
+++ /dev/null
@@ -1,12 +0,0 @@
-# Use goreman to run `go install github.com/mattn/goreman@latest`
-
-# 1. Start the cluster using Procfile
-# 2. Add learner node to the cluster
-# % etcdctl member add infra4 --peer-urls="http://127.0.0.1:42380" --learner=true
-
-# 3. Start learner node with goreman
-# Change the path of bin/etcd if etcd is located elsewhere
-etcd4: bin/etcd --name infra4 --listen-client-urls http://127.0.0.1:42379 --advertise-client-urls http://127.0.0.1:42379 --listen-peer-urls http://127.0.0.1:42380 --initial-advertise-peer-urls http://127.0.0.1:42380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra4=http://127.0.0.1:42380,infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state existing --enable-pprof --logger=zap --log-outputs=stderr
-
-# 4. The learner node can be promoted to voting member by the command
-# % etcdctl member promote
diff --git a/Procfile.v2 b/Procfile.v2
deleted file mode 100644
index c68511e56d4..00000000000
--- a/Procfile.v2
+++ /dev/null
@@ -1,7 +0,0 @@
-# Use goreman to run `go install github.com/mattn/goreman@latest`
-# Change the path of bin/etcd if etcd is located elsewhere
-etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-# in future, use proxy to listen on 2379
-#proxy: bin/etcd --name infra-proxy1 --proxy=on --listen-client-urls http://127.0.0.1:2378 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --enable-pprof
diff --git a/README.md b/README.md
index 3169e49bcc9..4aee0f16c1c 100644
--- a/README.md
+++ b/README.md
@@ -1,192 +1,675 @@
# etcd
-[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/etcd?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/etcd)
-[![Coverage](https://codecov.io/gh/etcd-io/etcd/branch/main/graph/badge.svg)](https://codecov.io/gh/etcd-io/etcd)
-[![Tests](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml)
-[![codeql-analysis](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml)
-[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs)
-[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/etcd)
-[![Releases](https://img.shields.io/github/release/etcd-io/etcd/all.svg?style=flat-square)](https://github.com/etcd-io/etcd/releases)
-[![LICENSE](https://img.shields.io/github/license/etcd-io/etcd.svg?style=flat-square)](https://github.com/etcd-io/etcd/blob/main/LICENSE)
-[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/etcd-io/etcd/badge)](https://api.securityscorecards.dev/projects/github.com/etcd-io/etcd)
+v3.5.2 Etcd是分布式系统中最关键的数据的可靠的分布式键值存储,其重点是:
-**Note**: The `main` branch may be in an *unstable or even broken state* during development. For stable versions, see [releases][github-release].
+自己看源码 用 2379:为客户端提供通讯 2380:为服务器间提供通讯
+![etcd ](./images/raft.png)
-![etcd Logo](logos/etcd-horizontal-color.svg)
+### 配置
-etcd is a distributed reliable key-value store for the most critical data of a distributed system, with a focus on being:
+```
+peer-cert-allowed-cn 允许的客户端证书CommonName your name or your server's hostname
+```
-* *Simple*: well-defined, user-facing API (gRPC)
-* *Secure*: automatic TLS with optional client cert authentication
-* *Fast*: benchmarked 10,000 writes/sec
-* *Reliable*: properly distributed using Raft
+## 空间占用整理
-etcd is written in Go and uses the [Raft][] consensus algorithm to manage a highly-available replicated log.
+```模拟
+设置etcd存储大小
+etcd --quota-backend-bytes=$((16*1024*1024))
-etcd is used [in production by many companies](./ADOPTERS.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [locksmith][], [vulcand][], [Doorman][], and many others. Reliability is further ensured by [**rigorous testing**](https://github.com/etcd-io/etcd/tree/main/tests/functional).
+写爆磁盘
+while [ 1 ]; do dd if=/dev/urandom bs=1024 count=1024 | etcdctl put key || break;done
-See [etcdctl][etcdctl] for a simple command line client.
+查看endpoint状态
+etcdctl --write-out=table endpoint status
-[raft]: https://raft.github.io/
-[k8s]: http://kubernetes.io/
-[doorman]: https://github.com/youtube/doorman
-[locksmith]: https://github.com/coreos/locksmith
-[vulcand]: https://github.com/vulcand/vulcand
-[etcdctl]: https://github.com/etcd-io/etcd/tree/main/etcdctl
+查看alarm
+etcdctl alarm list
-## Maintainers
+清理碎片
+etcdctl defrag
-[MAINTAINERS](MAINTAINERS) strive to shape an inclusive open source project culture where users are heard and contributors feel respected and empowered. MAINTAINERS maintain productive relationships across different companies and disciplines. Read more about [MAINTAINERS role and responsibilities](GOVERNANCE.md#maintainers).
+清理alarm
+etcdctl alarm disarm
-## Getting started
+获取当前etcd数据的修订版本(revision)
+rev=$(etcdctl -w json endpoint status | egrep -o -i '"revision":[0-9]*' | egrep -o '[0-9]*')
-### Getting etcd
+# 获取etcd当前版本号
+$ rev=$(etcdctl endpoint status --write-out="json" | egrep -o -i '"revision":[0-9]*' | egrep -o '[0-9].*')
+整合压缩旧版本数据 执行压缩操作,指定压缩的版本号为当前版本号
+etcdctl compact $rev
+执行碎片整理
+etcdctl defrag
+解除告警
+etcdctl alarm disarm
+备份以及查看备份数据信息
+etcdctl snapshot save backup.db
+etcdctl snapshot status backup.db
+```
-The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, and Docker on the [release page][github-release].
+```
+//--auto-compaction-mode=revision --auto-compaction-retention=1000 每5分钟自动压缩"latest revision" - 1000;
+//--auto-compaction-mode=periodic --auto-compaction-retention=12h 每1小时自动压缩并保留12小时窗口.
+👁etcd_backend/embed/config_test.go:TestAutoCompactionModeParse
-For more installation guides, please check out [play.etcd.io](http://play.etcd.io) and [operating etcd](https://etcd.io/docs/latest/op-guide).
+- 只保存一个小时的历史版本`etcd --auto-compaction-retention=1`
+- 只保留最近的3个版本`etcdctl compact 3`
+- 碎片整理`etcdctl defrag`
+```
-[github-release]: https://github.com/etcd-io/etcd/releases
-[branch-management]: https://etcd.io/docs/latest/branch_management
+### URL
-### Running etcd
+```
+http://127.0.0.1:2379/members
-First start a single-member cluster of etcd.
+```
-If etcd is installed using the [pre-built release binaries][github-release], run it from the installation location as below:
+### msgType
+
+| 消息类型 | 处理方 | 描述 |
+| :--- | :--- | :--- |
+| MsgHup | 节点支持 | 本地:开启选举,---->会触发vote或pre-vote |
+| MsgBeat | Leader |本地:心跳,---->给peers发送Msghearbeat |
+| MsgProp | Leader、Candidate、Follower | 本地:Propose -----> MsgApp |
+| MsgApp | Candidate、Follower | 非本地:操作日志【复制、配置变更 req】 |
+| MsgAppResp | Leader | 非本地:操作日志【复制 res】 |
+| MsgVote | 节点支持 | 非本地:投票请求 |
+| MsgVoteResp | Candidate | 非本地:投票相应 |
+| MsgPreVote | 节点支持 | 非本地:预投票请求 |
+| MsgPreVoteResp | Candidate | 非本地:预投票相应 |
+| MsgSnap | Candidate、Follower | 非本地:leader向follower拷贝快照,响应是MsgAppResp,告诉leader继续复制之后的值 |
+| MsgHeartbeat | Candidate、Follower | |
+| MsgHeartbeatResp | Leader | |
+| MsgUnreachable | Leader | 非本地:etcdserver通过这个消息告诉raft状态机某个follower不可达,让其发送消息的方式由pipeline切成ping-pong模式 |
+| MsgSnapStatus | Leader | 非本地:etcdserver通过这个消息告诉raft状态机快照发送成功还是失败 |
+| MsgCheckQuorum | Leader | |
+| MsgTransferLeader | Leader、Follower | 非本地: |
+| MsgTimeoutNow | Candidate、Follower | 非本地: |
+| MsgReadIndex | Leader、Follower | 非本地: |
+| MsgReadIndexResp | Follower | 非本地: |
+
+### issue
+
+- 1、CertFile与ClientCertFile KeyFile与ClientKeyFile的区别
+ ```
+ 在运行的过程中是配置的相同的;
+ 一般情况下,client与server是使用相同的ca进行的签发, 所有server端可以使用自己的私钥与证书验证client证书
+ 但如果不是同一个ca签发的; 那么就需要一个与client相同ca签发的证书文件与key
+
+ ```
+- 2、url
+ ```
+
+ ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
+ ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined")
+
+ --data-dir 指定节点的数据存储目录,这些数据包括节点ID,集群ID,集群初始化配置,Snapshot文件,若未指定—wal-dir,还会存储WAL文件;
+ --wal-dir 指定节点的was文件的存储目录,若指定了该参数,wal文件会和其他数据文件分开存储.
+ # member
+ 这个参数是etcd服务器自己监听时用的,也就是说,监听本机上的哪个网卡,哪个端口
+ --listen-client-urls DefaultListenClientURLs = "http://192.168.1.100:2379"
+ 和成员之间通信的地址.用于监听其他etcd member的url
+ --listen-peer-urls DefaultListenPeerURLs = "http://192.168.1.100:2380"
+
+ # cluster
+ 就是客户端(etcdctl/curl等)跟etcd服务进行交互时请求的url
+ --advertise-client-urls http://127.0.0.1:2379,http://192.168.1.100:2379,http://10.10.10.10:2379
+ 集群成员的 URL地址.且会通告群集的其余成员节点.
+ --initial-advertise-peer-urls http://127.0.0.1:12380 告知集群其他节点url.
+ # 集群中所有节点的信息
+ --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380'
+
+
+ 请求流程:
+ etcdctl endpoints=http://192.168.1.100:2379 --debug ls
+ 首先与endpoints建立链接, 获取配置在advertise-client-urls的参数
+ 然后依次与每一个地址建立链接,直到操作成功
+
+
+ --advertise-client-urls=https://192.168.1.100:2379
+ --cert-file=/etc/kubernetes/pki/etcd/server.crt
+ --client-cert-auth=true
+
+ --initial-advertise-peer-urls=https://192.168.1.100:2380
+ --initial-cluster=k8s-master01=https://192.168.1.100:2380
+
+ --key-file=/etc/kubernetes/pki/etcd/server.key
+ --listen-client-urls=https://127.0.0.1:2379,https://192.168.1.100:2379
+ --listen-metrics-urls=http://127.0.0.1:2381
+ --listen-peer-urls=https://192.168.1.100:2380
+
+ --name=k8s-master01
+
+ --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
+ --peer-client-cert-auth=true
+ --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
+
+ --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
+ --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
+ initial-advertise-peer-urls与initial-cluster要都包含
+
+ ```
+- 3 JournalLogOutput 日志
+ ```
+ systemd-journal是syslog 的补充,收集来自内核、启动过程早期阶段、标准输出、系统日志、守护进程启动和运行期间错误的信息,
+ 它会默认把日志记录到/run/log/journal中,仅保留一个月的日志,且系统重启后也会消失.
+ 但是当新建 /var/log/journal 目录后,它又会把日志记录到这个目录中,永久保存.
+ ```
+
+
+- checkquorum 过半机制:
+ ```
+ 每隔一段时间,leader节点会尝试连接集群中的节点(发送心跳),如果发现自己可以连接到的节点个数没有超过半数,则主动切换成follower状态.
+ 这样在网络分区的情况下,旧的leader节点可以很快的知道自己已经过期了.
+ ```
+
+
+- PreVote优化
+ ```
+ 当follower节点准备发起选举时候,先连接其他节点,并询问它们是否愿意参与选举(其他人是否能正常收到leader节点的信息),当有半数以上节点响应并参与则可以发起新一轮选举.
+ 解决分区之后节点重新恢复但term过大导致的leader选举问题
+ ```
+- WAL
+ ```
+ WAL全称是Write Ahead Log,是数据库中常用的持久化数据的方法.比如我们更新数据库的一条数据,如果直接找到这条数据并更新,
+ 可能会耗费比较长的时间.更快更安全的方式是先写一条Log数据到文件中,然后由后台线程来完成最终数据的更新,这条log中通常包含的是一条指令.
+ ```
+- 发送心跳消息的时候leader是怎么设置各个follower的commit?
+
+- leader收到follower的心跳响应之后会怎么去修改对应的follower元数据呢?
+
+- 快照 follower 当数据远落后于leader , leader会将快照发送过来 但由于网络原因,这一过程很慢 ,但是leader又生成了新的快照,wal没有旧的数据, 这时follower同步完,leader将最新新消息
+ 发送follower , follower reject ,但是此时wal已经没有对应的wal 又会发送新的快照, 这就会陷入死循环.....how? 看完源码再说吧
+ ![](./images/MsgReadIndex.png)
+- JointConfig 为什么是两个
+- 哪些场景会出现 Follower 日志与 Leader 冲突?
+ ```
+ leader崩溃的情况下可能(如老的leader可能还没有完全复制所有的日志条目),如果leader和follower出现持续崩溃会加剧这个现象.
+ follower可能会丢失一些在新的leader中有的日志条目,他也可能拥有一些leader没有的日志条目,或者两者都发生.
+ ```
+- follower如何删除无效日志?
+ ```
+ leader处理不一致是通过强制follower直接复制自己的日志来解决了.因此在follower中的冲突的日志条目会被leader的日志覆盖.
+ leader会记录follower的日志复制进度nextIndex,如果follower在追加日志时一致性检查失败,就会拒绝请求,此时leader就会减小 nextIndex 值并进行重试,最终在某个位置让follower跟leader一致.
+ ```
+- 为什么WAL日志模块只通过追加,也能删除已持久化冲突的日志条目呢?
+ ```
+ 其实这里 etcd 在实现上采用了一些比较有技巧的方法,在 WAL 日志中的确没删除废弃的日志条目,你可以在其中搜索到冲突的日志条目.
+ 只是 etcd 加载 WAL 日志时,发现一个 raft log index 位置上有多个日志条目的时候,会通过覆盖的方式,将最后写入的日志条目追加到 raft log 中,
+ 实现了删除冲突日志条目效果
+ https://github.com/etcd-io/etcd/issues/12589
+ ```
+
+(2) electionElapsed
+
+当 electionElapsed 超时,发送 MsgCheckQuorum 给当前节点,当前节点收到消息之后,进行自我检查,判断是否能继续维持 Leader 状态,如果不能切换为Follower.同时如果节点正在进行 Leader 切换(
+切换其他节点为Leader),当 electionElapsed 超时,说明 Leader 节点转移超时,会终止切换.
-```bash
-/tmp/etcd-download-test/etcd
```
+curl --------http---------> gateway ------------> etcd grpc server 2379
+ 将http转换成了grpc
-The etcd command can be simply run as such if it is moved to the system path as below:
-```bash
-mv /tmp/etcd-download-test/etcd /usr/local/bin/
-etcd
+
+127.0.0.1:2379
+1、HTTP2
+2、HTTP1
+
```
-This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.
+### module
-Next, let's set a single key, and then retrieve it:
+- github.com/soheilhy/cmux 可以在同一个listener上监听不同协议的请求
+-
```
-etcdctl put mykey "this is awesome"
-etcdctl get mykey
+etcdServer 会单独处理 Propose消息, 其余消息交给raft.step 来处理 [该函数,会随着节点角色的改变而发生改变] [会首先判断任期、索引,在判断消息类型]
+
+StartEtcd
+ 1、etcdserver.NewServer ->
+ heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
+ MySelfStartRaft
+ newRaftNode
+ r.ticker = time.NewTicker(r.heartbeat) 创建定时器、心跳
+ startNode ->
+ raft.StartNode ->
+ go n.run()
+ rd = n.rn.readyWithoutAccept() 获取待发送消息,会获取到r.msgs
+ readyc = n.readyc 待发送消息channel
+ - case pm := <-propc 网络发来的消息、除Propose消息
+ - case m := <-n.recvc G 处理来自peer的消息
+ - case cc := <-n.confc
+ - case <-n.tickc F取出数据
+ n.rn.Tick()
+ rn.raft.tick() 根据角色调用自己的函数
+ - r.tickElection
+ r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) 该函数是处理所有到来消息的入口
+ r.send(pb.Message
+ r.msgs = append(r.msgs, m) 放入要发送的消息
+ - r.tickHeartbeat
+ r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
+ - case readyc <- rd A放入数据
+ - case <-advancec:
+ - case c := <-n.status:
+ - case <-n.stop:
+ tr.AddPeer
+ startPeer 与每个peer都建立一个链接
+ r.Process
+ s.Process
+ s.r.Step(ctx, m)
+ n.step
+ stepWithWaitOption
+ case n.recvc <- m G 接收来自peer的消息
+
+ 2、e.Server.Start ->
+ EtcdServer.strat ->
+ s.start()
+ go s.run()
+ --> | # s.r=raftNode
+ --> | s.r.start(rh)
+ --> | go func()
+ --> | - case <-r.ticker.C: 接收定时器信号
+ --> | r.tick()
+ --> | r.Tick()
+ --> | case n.tickc <- struct{}{} F放入数据、不会阻塞,有size
+ --> | - case rd := <-r.Ready() 获取可以发送的数据 A取出数据
+ case r.applyc <- ap B放入数据
+ r.transport.Send(msgs) 发出响应数据
+ --> | - case <-r.stopped:
+
+ - case ap := <-s.r.apply() B取出数据
+ 读取applyc的数据,封装为JOB,放入调度器
+ - case leases := <-expiredLeaseC
+ 处理过期租约
+ - case err := <-s.errorc
+ 处理运行过程中出现的err,直接退出
+ - getSyncC
+ - case <-s.stop:
+ 启动过程中失败
+
+ 3、e.servePeers
+ 4、e.serveClients
+ 5、e.serveMetrics
+
```
-etcd is now running and serving client requests. For more, please check out:
+![iShot2021-07-15 23.46.37](./images/unstable_index.png)
-- [Interactive etcd playground](http://play.etcd.io)
-- [Animated quick demo](https://etcd.io/docs/latest/demo)
+```
+快照 + storage + unstable 的区别
+compacted <--- compacted <--- applied <--- committed <--- stable <--- unstable
+WAL 日志
+11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
+-------------------------------------------------------------------|--MemoryStorage|file--|-----------------
+-----压缩---------|---------------压缩---------------------|-------------------------------------------------
+----快照----------------- |----------------快照--------------------- | storage: 落盘的 | unstable 内存中的
+----快照----------------- |----------------快照--------------------- | | 在没有被持久化之前如果遇到了换届选举,这个日志可能会被相同索引值的新日志覆盖
-### etcd TCP ports
-The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication.
+每一条日志Entry需要经过unstable、stable、committed、applied、compacted五个阶段,接下来总结一下日志的状态转换过程:
-[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
+刚刚收到一条日志会被存储在unstable中,日志在没有被持久化之前如果遇到了换届选举,这个日志可能会被相同索引值的新日志覆盖,这个一点可以在raftLog.maybeAppend()和unstable.truncateAndAppend()找到相关的处理逻辑.
+unstable中存储的日志会被使用者写入持久存储(文件)中,这些持久化的日志就会从unstable转移到MemoryStorage中.
+读者可能会问MemoryStorage并不是持久存储啊,其实日志是被双写了,文件和MemoryStorage各存储了一份,而raft包只能访问MemoryStorage中的内容.这样设计的目的是用内存缓冲文件中的日志,在频繁操作日志的时候性能会更高.
+此处需要注意的是,MemoryStorage中的日志仅仅代表日志是可靠的,与提交和应用没有任何关系.
+leader会搜集所有peer的接收日志状态,只要日志被超过半数以上的peer接收,那么就会提交该日志,peer接收到leader的数据包更新自己的已提交的最大索引值,这样小于等于该索引值的日志就是可以被提交的日志.
+已经被提交的日志会被使用者获得,并逐条应用,进而影响使用者的数据状态.
+已经被应用的日志意味着使用者已经把状态持久化在自己的存储中了,这条日志就可以删除了,避免日志一直追加造成存储无限增大的问题.不要忘了所有的日志都存储在MemoryStorage中,不删除已应用的日志对于内存是一种浪费,这也就是日志的compacted.
-### Running a local etcd cluster
+每次用户提交日志,该日志会保存到 MemoryStorage 以及wal里,每当raft发送给上层程序一批已经commited日志,就会触发maybeTriggerSnapshot,当用户apply以后
+判断是否进行触发 MemoryStorage 打快照,当打了快照以后,会把当前快照点10000条以前的记录从 MemoryStorage.ents去除掉 【俗称压缩】
-First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
-Our [Procfile script](./Procfile) will set up a local example cluster. Start it with:
-```bash
-goreman start
+MemoryStorage并不是持久存储啊,其实日志是被双写了,文件和MemoryStorage各存储了一份,
+而raft包只能访问MemoryStorage中的内容.这样设计的目的是用内存缓冲文件中的日志,在频繁操作日志的时候性能会更高.
+此处需要注意的是,MemoryStorage中的日志仅仅代表日志是可靠的,与提交和应用没有任何关系.
```
-This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and optionally etcd `grpc-proxy`, which runs locally and composes a cluster.
+## Compact
+
+```
+1、新建Snapshot之后,一般会调用MemoryStorage.Compact()方法将MemoryStorage.ents中指定索引之前的Entry记录全部抛弃,
+从而实现压缩MemoryStorage.ents 的目的,具体实现如下: [GC]
+func (ms *MemoryStorage) Compact(compactIndex uint64)
+
+2、清除kvindex的修订版本,以及bolt.db里的历史数据
-Every cluster member and proxy accepts key value reads and key value writes.
-Follow the steps in [Procfile.learner](./Procfile.learner) to add a learner node to the cluster. Start the learner node with:
-```bash
-goreman -f ./Procfile.learner start
```
-### Install etcd client v3
+### WAl数据日志数据
-```bash
-go get go.etcd.io/etcd/client/v3
+```
+type Record struct {
+ Type int64
+ Crc uint32
+ Data []byte
+}
+- metadataType :1 元数据类型,元数据会保存当前的node id和cluster id.
+ type Metadata struct {
+ NodeID uint64
+ ClusterID uint64
+ }
+- entryType :2 日志条目
+ type Entry struct {
+ Term uint64
+ Index uint64
+ Type EntryType
+ EntryNormal
+ # msgType
+ EntryConfChange
+ EntryConfChangeV2
+ Data []byte
+ msg 👆🏻
+ }
+- stateType :3 当前Term,当前竞选者、当前已经commit的日志.
+- crcType :4 存放crc校验字段 Data为nil
+- snapshotType :5 快照的、日志的Index和Term
+ type Snapshot struct {
+ Index uint64
+ Term uint64
+ ConfState *raftpb.ConfState
+ }
```
-### Next steps
+```
+raft commit->apply 的数据 封装在ready结构体里 <-r.Ready()
+ raftNode拿到该ready做一些处理,过滤出操作日志 publishEntries
+ 上层应用拿到过滤后的,将其应用到kvstore【【
+```
-Now it's time to dig into the full etcd API and other guides.
+### 集群节点变更
-- Read the full [documentation][].
-- Explore the full gRPC [API][].
-- Set up a [multi-machine cluster][clustering].
-- Learn the [config format, env variables and flags][configuration].
-- Find [language bindings and tools][integrations].
-- Use TLS to [secure an etcd cluster][security].
-- [Tune etcd][tuning].
+```
+1、先检查是否有待应用的变更
+2、将变更信息放入raft unstable 等待发送----->发送,等到apply
+3、apply 该变更
+case rd := <-r.Ready(): 从raft拿到要apply的消息
+ case r.applyc <- ap:
+ go:
+ - ap := <-s.r.apply()
+ - s.applyAll(&ep, &ap)
+ - s.applyEntries(ep, apply)
+ - s.apply(ents, &ep.confState)
+ - case raftpb.EntryConfChange:
+ - s.applyConfChange(cc, confState, shouldApplyV3)
+ - *s.r.ApplyConfChange(cc) 获取应用配置变更之后的集群状态
+ - cs := r.applyConfChange(cc) 返回应用配置变更之后的集群状态,已生效,只更新了quorum.JointConfig与peer信息
+ - r.switchToConfig(cfg, prs)
+ -
+ | s.cluster.PromoteMember
+ | s.cluster.AddMember -----> 更新v2store[memory node tree]、backend[bolt.db]
+ | s.cluster.RemoveMember |---> 触发watcher
+ | s.cluster.UpdateRaftAttributes
+
+
+r.Advance()
-[documentation]: https://etcd.io/docs/latest
-[api]: https://etcd.io/docs/latest/learning/api
-[clustering]: https://etcd.io/docs/latest/op-guide/clustering
-[configuration]: https://etcd.io/docs/latest/op-guide/configuration
-[integrations]: https://etcd.io/docs/latest/integrations
-[security]: https://etcd.io/docs/latest/op-guide/security
-[tuning]: https://etcd.io/docs/latest/tuning
+```
-## Contact
+curl -H "X-Etcd-Cluster-ID:cdf818194e3a8c32" -H "X-PeerURLs:http://127.0.0.1:12345" -H "X-Min-Cluster-Version: 3.5.2"
+-H "X-Server-Version:3.5.2" http://localhost:2380/raft/stream/message/8e9e05c52164694d
+curl -X "POST" -H "X-Server-From:8e9e05c52164694d" "-H "X-Etcd-Cluster-ID:cdf818194e3a8c32" -H "
+X-PeerURLs:http://127.0.0.1:12345" -H "X-Min-Cluster-Version: 3.5.2" -H "X-Server-Version:3.5.2"
+-d "" http://localhost:2380/raft/stream/snapshot
-- Email: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
-- Slack: [#etcd](https://kubernetes.slack.com/messages/C3HD8ARJ5/details/) channel on Kubernetes ([get an invite](http://slack.kubernetes.io/))
-- [Community meetings](#Community-meetings)
+humanize.Bytes net.SplitHostPort([2001:db8:1f70::999:de8:7648:6e8]:100)->[2001:db8:1f70::999:de8:7648:6e8]
-### Community meetings
+BoltDB本身已经实现了事务的隔离性、原子性、持久化、一致性,并提供了并发的单写+多读
-etcd contributors and maintainers have monthly (every four weeks) meetings at 11:00 AM (USA Pacific) on Thursday.
+Linearizable、Serializable Linearizable Read (线性读),通俗地讲,就是读请求需要读到最新的已经提交的数据,不会读到旧数据
-An initial agenda will be posted to the [shared Google docs][shared-meeting-notes] a day before each meeting, and everyone is welcome to suggest additional topics or other agendas.
+V3和V2版本的对比 etcd的v2版本有下面的一些问题 Watch 机制可靠性问题 etcd v2 是内存型、不支持保存 key 历史版本的数据库,只在内存中使用滑动窗口保存了最近的 1000 条变更事件,当 etcd server
+写请求较多、网络波动时等场景,很容易出现事件丢失问题,进而又触发 client 数据全量拉取,产生大量 expensive request,甚至导致 etcd 雪崩. 性能瓶颈问题 1、etcd v2早起使用的是 HTTP/1.x
+API.HTTP/1.x 协议没有压缩机制,大量的请求可能导致 etcd 出现 CPU 高负载、OOM、丢包等问题; 2、etcd v2 client 会通过 HTTP 长连接轮询 Watch 事件,当 watcher 较多的时候,因
+HTTP/1.x 不支持多路复用,会创建大量的连接,消耗 server 端过多的 socket 和内存资源; 3、对于 key 中的 TTL过期时间,如果大量 key TTL 一样,也需要分别为每个 key 发起续期操作,当 key
+较多的时候,这会显著增加集群负载、导致集群性能显著下降; 内存开销问题 etcd v2 在内存维护了一颗树来保存所有节点 key 及 value.在数据量场景略大的场景,如配置项较多、存储了大量 Kubernetes Events,
+它会导致较大的内存开销,同时 etcd 需要定时把全量内存树持久化到磁盘.这会消耗大量的 CPU 和磁盘 I/O 资源,对系统的稳定性造成一定影响. etcd v3 的出现就是为了解决以上稳定性、扩展性、性能问题 1、在内存开销、Watch
+事件可靠性、功能局限上,它通过引入 B-tree、boltdb 实现一个 MVCC 数据库,数据模型从层次型目录结构改成扁平的 key-value,提供稳定可靠的事件通知,实现了事务,支持多 key 原子更新,同时基于 boltdb
+的持久化存储,显著降低了 etcd 的内存占用、避免了 etcd v2 定期生成快照时的昂贵的资源开销; 2、etcd v3 使用了 gRPC API,使用 protobuf 定义消息,消息编解码性能相比 JSON 超过 2 倍以上,并通过
+HTTP/2.0 多路复用机制,减少了大量 watcher 等场景下的连接数; 3、使用 Lease 优化 TTL 机制,每个 Lease 具有一个 TTL,相同的 TTL 的 key 关联一个 Lease,Lease
+过期的时候自动删除相关联的所有 key,不再需要为每个 key 单独续期; 4、etcd v3 支持范围、分页查询,可避免大包等 expensive request.
-Meeting recordings are uploaded to official etcd [YouTube channel].
+pb.Message.Entries = [ pb.InternalRaftRequest ]
-Get calendar invitation by joining [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev) mailing group.
+etcd中每新建一个key ,会为其分配一个主版本,同时还有一个sub版本,长度17byte 格式: 8byte_8byte 例如[00000002_00000000]---> 转换成bolt.db的键值就是
+00000000000000025f0000000000000000
-Join Hangouts Meet: [meet.google.com/umg-nrxn-qvs](https://meet.google.com/umg-nrxn-qvs)
+### 线性一致性读流程
-Join by phone: +1 405-792-0633 PIN: 299 906#
+```
+localNode.run() 一直死循环
+ 判断是否有ready的数据,其中 r.readStates就是一项指标
+ n.readyc <- ready
+
+---------------------------------
+raftNode.start
+ case rd := <-r.Ready(): 消费端: 获取ready数据,包含r.ReadStates = r.readStates
+ select {
+ case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: // 发送响应数据
+ case <-time.After(internalTimeout):
+ r.lg.Warn("发送读状态超时", zap.Duration("timeout", internalTimeout))
+ case <-r.stopped:
+ return
+ }
+---------------------------------
+leader:
+ stepLeader;
+ case pb.MsgReadIndex:
+ 1、集群只有一个节点
+ r.readStates = append(r.readStates, ReadState{Index:r.raftLog.committed, RequestCtx: 自增ID})
+ 2、
+ 引入pendingReadIndex、readIndexQueue 心跳广播 自增ID 等待大多数据集群节点回应 自增ID
+ case pb.MsgHeartbeatResp:
+ rss := r.readOnly.advance(m)
+
+rd.ReadStates
+----------------------------------
+linearizableReadLoop 发送MsgReadIndex消息,
+ s.requestCurrentIndex
+ 1、s.sendReadIndex(自增ID)
+ s.r.ReadIndex 发送pb.MsgReadIndex消息,数据是自增ID
+ 2、case rs := <-s.r.readStateC: 等待响应 得到ReadState{Index:r.raftLog.committed, RequestCtx: 自增ID}
+ return r.raftLog.committed
+ r.raftLog.committed >= s.getAppliedIndex() 如果满足这个条件
+ nr.notify(nil) 相当于往nc.c发消息
+--------------
+get
+linearizeReadNotify 线性读,触发linearizableReadLoop,并等待结果
+ 1、case s.readwaitc <- struct{}{}: 触发线性读
+ 2、case <-nc.c: 等待结果
+ return nc.err
+继续在本节点读取数据
+
+```
-[shared-meeting-notes]: https://docs.google.com/document/d/16XEGyPBisZvmmoIHSZzv__LoyOeluC5a4x353CX0SIM/edit
-[YouTube channel]: https://www.youtube.com/channel/UC7tUWR24I5AR9NMsG-NYBlg
+### 租约检查点机制
-## Contributing
+```
+bug:
+ 如果租约在到期前,Leader切换, 那么它的租约会重置
+如何解决这个问题
+ 每过一段时间,将每个key剩余多长时间同步到其他节点的db中,这样如果发生leader切换,租约的误差也只是 这个间隔
+ - 定期同步租约剩余时间至其他节点的db
+issue:
+ 第一发送 ttl 10s remainingTtl 5s
+ 假设将该消息发送到对端,因为网络问题花了3秒, 那么当对端收到时,实际remainingTtl应该是2s,但还是变成了5s
+- 如果时间这么长,那这个节点肯定出问题了,那么也不会成为leader
+
+
+作者回复:
+从原理上我们知道lease是leader在内存中维护过期最小堆的,因此续期操作client是必须要直接发送给leader的,
+如果follower节点收到了keepalive请求,会转发给leader节点.续期操作不经过raft协议处理同步,
+而leaseGrant/Revoke请求会经过raft协议同步给各个节点,因此任意节点都可以处理它.
+```
-See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
+```
+curl -L http://127.0.0.1:2379/version
+curl -L http://192.168.59.156:2379/metrics
+etcdctl cluster-health
+etcdutl backup --data-dir /var/lib/etcd --backup-dir /tmp/etcd
+
+# no crt
+etcdctl snap save a.db
+etcdctl snapshot restore a.db
+# crt
+etcdctl snap save --cert=./cert/server.crt --cacert=./cert/ca.crt --key=./cert/server.key a.db
+```
-## Reporting bugs
+### leader trans
-See [reporting bugs](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/reporting_bugs.md) for details about reporting any issues.
+```
+Step()
+ case pb.MsgVote, pb.MsgPreVote:
+ 变更leader,....等操作
+
+ stepLeader()
+ case pb.MsgTransferLeader:
+ r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) // 给指定的节点发消息
+ stepFollower()
+ case pb.MsgTimeoutNow:
+ r.hup(campaignTransfer)
+ # 给每一个节点发送
+ r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
-## Reporting a security vulnerability
+```
-See [security disclosure and release process](security/README.md) for details on how to report a security vulnerability and how the etcd team manages it.
+证书解析认证性能极低
-## Issue and PR management
+```
-See [issue triage guidelines](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/triage_issues.md) for details on how issues are managed.
+#创建一个admin role
+etcdctl role add admin --user root:root
+#分配一个可读写[hello,helly)范围数据的权限给admin role
+etcdctl role grant-permission admin readwrite hello helly --user root:root
+# 将用户alice和admin role关联起来,赋予admin权限给user
+etcdctl user grant-role alice admin --user root:root
+```
-See [PR management](https://github.com/etcd-io/etcd/blob/main/Documentation/contributor-guide/triage_prs.md) for guidelines on how pull requests are managed.
+etcd 保存用户 key 与版本号映射关系的数据结构 B-tree,为什么 etcd 使用它而不使用哈希表、平衡二叉树?
-## etcd Emeritus Maintainers
+```
+从 etcd 的功能特性上分析, 因 etcd 支持范围查询,因此保存索引的数据结构也必须支持范围查询才行.所以哈希表不适合,而 B-tree 支持范围查询.
+从性能上分析,平横二叉树每个节点只能容纳一个数据、导致树的高度较高,而 B-tree 每个节点可以容纳多个数据,
+树的高度更低,更扁平,涉及的查找次数更少,具有优越的增、删、改、查性能.
+```
+
+你认为 etcd 为什么删除使用 lazy delete 方式呢? 相比同步 delete, 各有什么优缺点?
+
+```
+采用延迟删除
+1、为了保证key对应的watcher能够获取到key的所有状态信息,留给watcher时间做相应的处理.
+2、实时从boltdb删除key,会可能触发树的不平衡,影响其他读写请求的性能.
+
+etcd要保存key的历史版本,直接删除就不能支持revision查询了;
+lazy方式性能更高,空闲空间可以再利用;
+```
+
+当你突然删除大量 key 后,db 大小是立刻增加还是减少呢?
-These emeritus maintainers dedicated a part of their career to etcd and reviewed code, triaged bugs and pushed the project forward over a substantial period of time. Their contribution is greatly appreciated.
+```
+应该会增大,etcd不会立即把空间返回系统而是维护起来后续使用,维护空闲页面应该需要一些内存;
+```
+
+```
-* Fanmin Shi
-* Anthony Romano
-* Brandon Philips
-* Joe Betz
-* Gyuho Lee
-* Jingyi Hu
-* Wenjia Zhang
-* Xiang Li
-* Ben Darnell
+$ etcdctl txn -i
+compares:
+mod("Alice") = "2"
+mod("Bob") = "3"
-### License
+success requests (get, put, del):
+put Alice 100
+put Bob 300
+
+success requests (get, put, del): //对应Then语句
+put Alice 100 //Alice账号初始资金200减100
+put Bob 300 //Bob账号初始资金200加100
-etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
+failure requests (get, put, del): //对应Else语句
+get Alice
+get Bob
+
+
+```
+
+
+```
+若 etcd 节点内存不足,可能会导致 db 文件对应的内存页被换出.
+当读请求命中的页未在内存中时,就会产生缺页异常,导致读过程中产生磁盘 IO.
+这样虽然避免了 etcd 进程 OOM,但是此过程会产生较大的延时.
+```
+
+
+./benchmark --conns=100 --clients=1000 range hello --consistency=l --total=500000
+./benchmark --conns=100 --clients=1000 range hello --consistency=s --total=500000
+
+# 256byte
+./benchmark --conns=100 --clients=1000 put --key-size=8 --sequential-keys --total=10000000 --val-size=256
+# 1m
+./benchmark --conns=100 --clients=1000 put --key-size=8 --sequential-keys --total=500 --val-size=1024000
+
+
+
+
+
+```
+{
+ "Key":"a",
+ "Modified":{
+ "Main":8,
+ "Sub":0
+ },
+ "Generations":[
+ {
+ "VersionCount":4,
+ "Created":{
+ "Main":2,
+ "Sub":0
+ },
+ "Revs":[
+ {
+ "Main":2,
+ "Sub":0
+ },
+ {
+ "Main":3,
+ "Sub":0
+ },
+ {
+ "Main":4,
+ "Sub":0
+ },
+ {
+ "Main":5,
+ "Sub":0
+ }
+ ]
+ },
+ {
+ "VersionCount":3,
+ "Created":{
+ "Main":6,
+ "Sub":0
+ },
+ "Revs":[
+ {
+ "Main":6,
+ "Sub":0
+ },
+ {
+ "Main":7,
+ "Sub":0
+ },
+ {
+ "Main":8,
+ "Sub":0
+ }
+ ]
+ }
+ ]
+}
+```
\ No newline at end of file
diff --git a/api/LICENSE b/api/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/api/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/api/authpb/auth.pb.go b/api/authpb/auth.pb.go
deleted file mode 100644
index 16affcd62cf..00000000000
--- a/api/authpb/auth.pb.go
+++ /dev/null
@@ -1,1158 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: auth.proto
-
-package authpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type Permission_Type int32
-
-const (
- READ Permission_Type = 0
- WRITE Permission_Type = 1
- READWRITE Permission_Type = 2
-)
-
-var Permission_Type_name = map[int32]string{
- 0: "READ",
- 1: "WRITE",
- 2: "READWRITE",
-}
-
-var Permission_Type_value = map[string]int32{
- "READ": 0,
- "WRITE": 1,
- "READWRITE": 2,
-}
-
-func (x Permission_Type) String() string {
- return proto.EnumName(Permission_Type_name, int32(x))
-}
-
-func (Permission_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_8bbd6f3875b0e874, []int{2, 0}
-}
-
-type UserAddOptions struct {
- NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UserAddOptions) Reset() { *m = UserAddOptions{} }
-func (m *UserAddOptions) String() string { return proto.CompactTextString(m) }
-func (*UserAddOptions) ProtoMessage() {}
-func (*UserAddOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bbd6f3875b0e874, []int{0}
-}
-func (m *UserAddOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UserAddOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_UserAddOptions.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *UserAddOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserAddOptions.Merge(m, src)
-}
-func (m *UserAddOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *UserAddOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_UserAddOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserAddOptions proto.InternalMessageInfo
-
-// User is a single entry in the bucket authUsers
-type User struct {
- Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- Roles []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"`
- Options *UserAddOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *User) Reset() { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage() {}
-func (*User) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bbd6f3875b0e874, []int{1}
-}
-func (m *User) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_User.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *User) XXX_Merge(src proto.Message) {
- xxx_messageInfo_User.Merge(m, src)
-}
-func (m *User) XXX_Size() int {
- return m.Size()
-}
-func (m *User) XXX_DiscardUnknown() {
- xxx_messageInfo_User.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_User proto.InternalMessageInfo
-
-// Permission is a single entity
-type Permission struct {
- PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
- Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
- RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Permission) Reset() { *m = Permission{} }
-func (m *Permission) String() string { return proto.CompactTextString(m) }
-func (*Permission) ProtoMessage() {}
-func (*Permission) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bbd6f3875b0e874, []int{2}
-}
-func (m *Permission) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Permission.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Permission) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Permission.Merge(m, src)
-}
-func (m *Permission) XXX_Size() int {
- return m.Size()
-}
-func (m *Permission) XXX_DiscardUnknown() {
- xxx_messageInfo_Permission.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Permission proto.InternalMessageInfo
-
-// Role is a single entry in the bucket authRoles
-type Role struct {
- Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Role) Reset() { *m = Role{} }
-func (m *Role) String() string { return proto.CompactTextString(m) }
-func (*Role) ProtoMessage() {}
-func (*Role) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bbd6f3875b0e874, []int{3}
-}
-func (m *Role) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Role.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Role) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Role.Merge(m, src)
-}
-func (m *Role) XXX_Size() int {
- return m.Size()
-}
-func (m *Role) XXX_DiscardUnknown() {
- xxx_messageInfo_Role.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Role proto.InternalMessageInfo
-
-func init() {
- proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
- proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions")
- proto.RegisterType((*User)(nil), "authpb.User")
- proto.RegisterType((*Permission)(nil), "authpb.Permission")
- proto.RegisterType((*Role)(nil), "authpb.Role")
-}
-
-func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
-
-var fileDescriptor_8bbd6f3875b0e874 = []byte{
- // 338 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40,
- 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba,
- 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13,
- 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0,
- 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48,
- 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1,
- 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9,
- 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12,
- 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a,
- 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1,
- 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd,
- 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07,
- 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb,
- 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0,
- 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c,
- 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d,
- 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c,
- 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9,
- 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb,
- 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d,
- 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01,
- 0x00, 0x00,
-}
-
-func (m *UserAddOptions) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *UserAddOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.NoPassword {
- i--
- if m.NoPassword {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *User) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *User) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Options != nil {
- {
- size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintAuth(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- if len(m.Roles) > 0 {
- for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Roles[iNdEx])
- copy(dAtA[i:], m.Roles[iNdEx])
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx])))
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Permission) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0x12
- }
- if m.PermType != 0 {
- i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Role) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Role) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.KeyPermission) > 0 {
- for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintAuth(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
- offset -= sovAuth(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *UserAddOptions) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.NoPassword {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *User) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- if len(m.Roles) > 0 {
- for _, s := range m.Roles {
- l = len(s)
- n += 1 + l + sovAuth(uint64(l))
- }
- }
- if m.Options != nil {
- l = m.Options.Size()
- n += 1 + l + sovAuth(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Permission) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.PermType != 0 {
- n += 1 + sovAuth(uint64(m.PermType))
- }
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Role) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- if len(m.KeyPermission) > 0 {
- for _, e := range m.KeyPermission {
- l = e.Size()
- n += 1 + l + sovAuth(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovAuth(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozAuth(x uint64) (n int) {
- return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *UserAddOptions) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.NoPassword = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipAuth(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *User) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: User: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
- if m.Name == nil {
- m.Name = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
- if m.Password == nil {
- m.Password = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Options == nil {
- m.Options = &UserAddOptions{}
- }
- if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAuth(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Permission) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Permission: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
- }
- m.PermType = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PermType |= Permission_Type(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAuth(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Role) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Role: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
- if m.Name == nil {
- m.Name = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.KeyPermission = append(m.KeyPermission, &Permission{})
- if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAuth(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipAuth(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthAuth
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupAuth
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthAuth
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupAuth = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/api/etcdserverpb/etcdserver.pb.go b/api/etcdserverpb/etcdserver.pb.go
deleted file mode 100644
index 38434d09c56..00000000000
--- a/api/etcdserverpb/etcdserver.pb.go
+++ /dev/null
@@ -1,1002 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: etcdserver.proto
-
-package etcdserverpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type Request struct {
- ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
- Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
- Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
- Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
- Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
- PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
- PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
- PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
- Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
- Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
- Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
- Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
- Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
- Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
- Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
- Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
- Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-func (*Request) Descriptor() ([]byte, []int) {
- return fileDescriptor_09ffbeb3bebbce7e, []int{0}
-}
-func (m *Request) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Request.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Request) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Request.Merge(m, src)
-}
-func (m *Request) XXX_Size() int {
- return m.Size()
-}
-func (m *Request) XXX_DiscardUnknown() {
- xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-type Metadata struct {
- NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
- ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metadata) Reset() { *m = Metadata{} }
-func (m *Metadata) String() string { return proto.CompactTextString(m) }
-func (*Metadata) ProtoMessage() {}
-func (*Metadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_09ffbeb3bebbce7e, []int{1}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Metadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metadata.Merge(m, src)
-}
-func (m *Metadata) XXX_Size() int {
- return m.Size()
-}
-func (m *Metadata) XXX_DiscardUnknown() {
- xxx_messageInfo_Metadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
- proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
-}
-
-func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
-
-var fileDescriptor_09ffbeb3bebbce7e = []byte{
- // 380 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
- 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
- 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
- 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
- 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
- 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
- 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
- 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
- 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
- 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
- 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
- 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
- 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
- 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
- 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
- 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
- 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
- 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
- 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
- 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
- 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
- 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
- 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
- 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
-}
-
-func (m *Request) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Request) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Refresh != nil {
- i--
- if *m.Refresh {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x88
- }
- i--
- if m.Stream {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x80
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
- i--
- dAtA[i] = 0x78
- i--
- if m.Quorum {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x70
- i--
- if m.Sorted {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x68
- i--
- if m.Recursive {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x60
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
- i--
- dAtA[i] = 0x58
- i--
- if m.Wait {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x50
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
- i--
- dAtA[i] = 0x48
- if m.PrevExist != nil {
- i--
- if *m.PrevExist {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x40
- }
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
- i--
- dAtA[i] = 0x38
- i -= len(m.PrevValue)
- copy(dAtA[i:], m.PrevValue)
- i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
- i--
- dAtA[i] = 0x32
- i--
- if m.Dir {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x28
- i -= len(m.Val)
- copy(dAtA[i:], m.Val)
- i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
- i--
- dAtA[i] = 0x22
- i -= len(m.Path)
- copy(dAtA[i:], m.Path)
- i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.Method)
- copy(dAtA[i:], m.Method)
- i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
- i--
- dAtA[i] = 0x12
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *Metadata) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
- i--
- dAtA[i] = 0x10
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
- offset -= sovEtcdserver(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Request) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovEtcdserver(uint64(m.ID))
- l = len(m.Method)
- n += 1 + l + sovEtcdserver(uint64(l))
- l = len(m.Path)
- n += 1 + l + sovEtcdserver(uint64(l))
- l = len(m.Val)
- n += 1 + l + sovEtcdserver(uint64(l))
- n += 2
- l = len(m.PrevValue)
- n += 1 + l + sovEtcdserver(uint64(l))
- n += 1 + sovEtcdserver(uint64(m.PrevIndex))
- if m.PrevExist != nil {
- n += 2
- }
- n += 1 + sovEtcdserver(uint64(m.Expiration))
- n += 2
- n += 1 + sovEtcdserver(uint64(m.Since))
- n += 2
- n += 2
- n += 2
- n += 1 + sovEtcdserver(uint64(m.Time))
- n += 3
- if m.Refresh != nil {
- n += 3
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Metadata) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovEtcdserver(uint64(m.NodeID))
- n += 1 + sovEtcdserver(uint64(m.ClusterID))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovEtcdserver(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozEtcdserver(x uint64) (n int) {
- return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Request) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Request: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthEtcdserver
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Method = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthEtcdserver
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Path = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthEtcdserver
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Val = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Dir = bool(v != 0)
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthEtcdserver
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PrevValue = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
- }
- m.PrevIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PrevIndex |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.PrevExist = &b
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
- }
- m.Expiration = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Expiration |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Wait = bool(v != 0)
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
- }
- m.Since = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Since |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Recursive = bool(v != 0)
- case 13:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Sorted = bool(v != 0)
- case 14:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Quorum = bool(v != 0)
- case 15:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
- }
- m.Time = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Time |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 16:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Stream = bool(v != 0)
- case 17:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.Refresh = &b
- default:
- iNdEx = preIndex
- skippy, err := skipEtcdserver(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Metadata) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
- }
- m.NodeID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NodeID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
- }
- m.ClusterID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ClusterID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipEtcdserver(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipEtcdserver(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthEtcdserver
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupEtcdserver
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEtcdserver
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupEtcdserver = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/api/etcdserverpb/raft_internal.pb.go b/api/etcdserverpb/raft_internal.pb.go
deleted file mode 100644
index d59e65813f4..00000000000
--- a/api/etcdserverpb/raft_internal.pb.go
+++ /dev/null
@@ -1,2677 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: raft_internal.proto
-
-package etcdserverpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
- membershippb "go.etcd.io/etcd/api/v3/membershippb"
- _ "go.etcd.io/etcd/api/v3/versionpb"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type RequestHeader struct {
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // username is a username that is associated with an auth token of gRPC connection
- Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
- // auth_revision is a revision number of auth.authStore. It is not related to mvcc
- AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RequestHeader) Reset() { *m = RequestHeader{} }
-func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
-func (*RequestHeader) ProtoMessage() {}
-func (*RequestHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_b4c9a9be0cfca103, []int{0}
-}
-func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RequestHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RequestHeader.Merge(m, src)
-}
-func (m *RequestHeader) XXX_Size() int {
- return m.Size()
-}
-func (m *RequestHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_RequestHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
-
-// An InternalRaftRequest is the union of all requests which can be
-// sent via raft.
-type InternalRaftRequest struct {
- Header *RequestHeader `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"`
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- V2 *Request `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"`
- Range *RangeRequest `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
- Put *PutRequest `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"`
- DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"`
- Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"`
- Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"`
- LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"`
- LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"`
- Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"`
- LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint,proto3" json:"lease_checkpoint,omitempty"`
- AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"`
- AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"`
- AuthStatus *AuthStatusRequest `protobuf:"bytes,1013,opt,name=auth_status,json=authStatus,proto3" json:"auth_status,omitempty"`
- Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"`
- AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"`
- AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"`
- AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"`
- AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"`
- AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"`
- AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"`
- AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"`
- AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"`
- AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"`
- AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"`
- AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"`
- AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"`
- AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"`
- ClusterVersionSet *membershippb.ClusterVersionSetRequest `protobuf:"bytes,1300,opt,name=cluster_version_set,json=clusterVersionSet,proto3" json:"cluster_version_set,omitempty"`
- ClusterMemberAttrSet *membershippb.ClusterMemberAttrSetRequest `protobuf:"bytes,1301,opt,name=cluster_member_attr_set,json=clusterMemberAttrSet,proto3" json:"cluster_member_attr_set,omitempty"`
- DowngradeInfoSet *membershippb.DowngradeInfoSetRequest `protobuf:"bytes,1302,opt,name=downgrade_info_set,json=downgradeInfoSet,proto3" json:"downgrade_info_set,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
-func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
-func (*InternalRaftRequest) ProtoMessage() {}
-func (*InternalRaftRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_b4c9a9be0cfca103, []int{1}
-}
-func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *InternalRaftRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InternalRaftRequest.Merge(m, src)
-}
-func (m *InternalRaftRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *InternalRaftRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo
-
-type EmptyResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EmptyResponse) Reset() { *m = EmptyResponse{} }
-func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
-func (*EmptyResponse) ProtoMessage() {}
-func (*EmptyResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_b4c9a9be0cfca103, []int{2}
-}
-func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *EmptyResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EmptyResponse.Merge(m, src)
-}
-func (m *EmptyResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *EmptyResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_EmptyResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo
-
-// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
-// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
-// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
-type InternalAuthenticateRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- // simple_token is generated in API layer (etcdserver/v3_server.go)
- SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} }
-func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
-func (*InternalAuthenticateRequest) ProtoMessage() {}
-func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_b4c9a9be0cfca103, []int{3}
-}
-func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src)
-}
-func (m *InternalAuthenticateRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
- proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
- proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
- proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
-}
-
-func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
-
-var fileDescriptor_b4c9a9be0cfca103 = []byte{
- // 1054 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x56, 0x5d, 0x6f, 0x1b, 0x45,
- 0x14, 0xad, 0xd3, 0x34, 0x89, 0xc7, 0x49, 0x9a, 0x4e, 0x52, 0x3a, 0x38, 0x92, 0x71, 0x03, 0x2d,
- 0x01, 0x8a, 0x53, 0x1c, 0x78, 0xe1, 0x05, 0x5c, 0x3b, 0x4a, 0x83, 0x4a, 0x15, 0x6d, 0x0b, 0xaa,
- 0x84, 0xd0, 0x32, 0xde, 0xbd, 0xb1, 0xb7, 0x59, 0xef, 0x2e, 0x33, 0x63, 0x37, 0x7d, 0xe5, 0x91,
- 0x67, 0x40, 0xfc, 0x0c, 0x3e, 0xff, 0x43, 0x85, 0xf8, 0x28, 0xf0, 0x07, 0x20, 0xbc, 0xf0, 0x0e,
- 0xbc, 0xa3, 0xf9, 0xd8, 0x5d, 0xaf, 0x3d, 0xce, 0xdb, 0xfa, 0xde, 0x73, 0xcf, 0x39, 0x33, 0x73,
- 0xef, 0x78, 0xd0, 0x3a, 0xa3, 0x47, 0xc2, 0x0d, 0x22, 0x01, 0x2c, 0xa2, 0x61, 0x23, 0x61, 0xb1,
- 0x88, 0xf1, 0x32, 0x08, 0xcf, 0xe7, 0xc0, 0x46, 0xc0, 0x92, 0x6e, 0x75, 0xa3, 0x17, 0xf7, 0x62,
- 0x95, 0xd8, 0x91, 0x5f, 0x1a, 0x53, 0x5d, 0xcb, 0x31, 0x26, 0x52, 0x66, 0x89, 0x67, 0x3e, 0xeb,
- 0x32, 0xb9, 0x43, 0x93, 0x60, 0x67, 0x04, 0x8c, 0x07, 0x71, 0x94, 0x74, 0xd3, 0x2f, 0x83, 0xb8,
- 0x9e, 0x21, 0x06, 0x30, 0xe8, 0x02, 0xe3, 0xfd, 0x20, 0x49, 0xba, 0x63, 0x3f, 0x34, 0x6e, 0x8b,
- 0xa1, 0x15, 0x07, 0x3e, 0x1e, 0x02, 0x17, 0xb7, 0x81, 0xfa, 0xc0, 0xf0, 0x2a, 0x9a, 0x3b, 0xe8,
- 0x90, 0x52, 0xbd, 0xb4, 0x3d, 0xef, 0xcc, 0x1d, 0x74, 0x70, 0x15, 0x2d, 0x0d, 0xb9, 0x34, 0x3f,
- 0x00, 0x32, 0x57, 0x2f, 0x6d, 0x97, 0x9d, 0xec, 0x37, 0xbe, 0x81, 0x56, 0xe8, 0x50, 0xf4, 0x5d,
- 0x06, 0xa3, 0x40, 0x6a, 0x93, 0xf3, 0xb2, 0xec, 0xd6, 0xe2, 0xa7, 0xdf, 0x93, 0xf3, 0xbb, 0x8d,
- 0xd7, 0x9c, 0x65, 0x99, 0x75, 0x4c, 0xf2, 0xcd, 0xc5, 0x4f, 0x54, 0xf8, 0xe6, 0xd6, 0x0f, 0x18,
- 0xad, 0x1f, 0x98, 0x1d, 0x71, 0xe8, 0x91, 0x30, 0x06, 0xf0, 0x2e, 0x5a, 0xe8, 0x2b, 0x13, 0xc4,
- 0xaf, 0x97, 0xb6, 0x2b, 0xcd, 0xcd, 0xc6, 0xf8, 0x3e, 0x35, 0x0a, 0x3e, 0x1d, 0x03, 0x9d, 0xf2,
- 0x7b, 0x0d, 0xcd, 0x8d, 0x9a, 0xca, 0x69, 0xa5, 0x79, 0xd9, 0x4a, 0xe0, 0xcc, 0x8d, 0x9a, 0xf8,
- 0x26, 0xba, 0xc0, 0x68, 0xd4, 0x03, 0x65, 0xb9, 0xd2, 0xac, 0x4e, 0x20, 0x65, 0x2a, 0x85, 0x6b,
- 0x20, 0x7e, 0x19, 0x9d, 0x4f, 0x86, 0x82, 0xcc, 0x2b, 0x3c, 0x29, 0xe2, 0x0f, 0x87, 0xe9, 0x22,
- 0x1c, 0x09, 0xc2, 0x6d, 0xb4, 0xec, 0x43, 0x08, 0x02, 0x5c, 0x2d, 0x72, 0x41, 0x15, 0xd5, 0x8b,
- 0x45, 0x1d, 0x85, 0x28, 0x48, 0x55, 0xfc, 0x3c, 0x26, 0x05, 0xc5, 0x49, 0x44, 0x16, 0x6c, 0x82,
- 0xf7, 0x4f, 0xa2, 0x4c, 0x50, 0x9c, 0x44, 0xf8, 0x2d, 0x84, 0xbc, 0x78, 0x90, 0x50, 0x4f, 0xc8,
- 0x63, 0x58, 0x54, 0x25, 0xcf, 0x15, 0x4b, 0xda, 0x59, 0x3e, 0xad, 0x1c, 0x2b, 0xc1, 0x6f, 0xa3,
- 0x4a, 0x08, 0x94, 0x83, 0xdb, 0x63, 0x34, 0x12, 0x64, 0xc9, 0xc6, 0x70, 0x47, 0x02, 0xf6, 0x65,
- 0x3e, 0x63, 0x08, 0xb3, 0x90, 0x5c, 0xb3, 0x66, 0x60, 0x30, 0x8a, 0x8f, 0x81, 0x94, 0x6d, 0x6b,
- 0x56, 0x14, 0x8e, 0x02, 0x64, 0x6b, 0x0e, 0xf3, 0x98, 0x3c, 0x16, 0x1a, 0x52, 0x36, 0x20, 0xc8,
- 0x76, 0x2c, 0x2d, 0x99, 0xca, 0x8e, 0x45, 0x01, 0xf1, 0x03, 0xb4, 0xa6, 0x65, 0xbd, 0x3e, 0x78,
- 0xc7, 0x49, 0x1c, 0x44, 0x82, 0x54, 0x54, 0xf1, 0x0b, 0x16, 0xe9, 0x76, 0x06, 0x32, 0x34, 0x69,
- 0xb3, 0xbe, 0xee, 0x5c, 0x0c, 0x8b, 0x00, 0xdc, 0x42, 0x15, 0xd5, 0xdd, 0x10, 0xd1, 0x6e, 0x08,
- 0xe4, 0x6f, 0xeb, 0xae, 0xb6, 0x86, 0xa2, 0xbf, 0xa7, 0x00, 0xd9, 0x9e, 0xd0, 0x2c, 0x84, 0x3b,
- 0x48, 0x8d, 0x80, 0xeb, 0x07, 0x5c, 0x71, 0xfc, 0xb3, 0x68, 0xdb, 0x14, 0xc9, 0xd1, 0xd1, 0x88,
- 0x6c, 0x53, 0x68, 0x1e, 0xc3, 0xef, 0x18, 0x23, 0x5c, 0x50, 0x31, 0xe4, 0xe4, 0xbf, 0x99, 0x46,
- 0xee, 0x29, 0xc0, 0xc4, 0xca, 0xde, 0xd0, 0x8e, 0x74, 0x0e, 0xdf, 0xd5, 0x8e, 0x20, 0x12, 0x81,
- 0x47, 0x05, 0x90, 0x7f, 0x35, 0xd9, 0x4b, 0x45, 0xb2, 0x74, 0x3a, 0x5b, 0x63, 0xd0, 0xd4, 0x5a,
- 0xa1, 0x1e, 0xef, 0x99, 0x2b, 0x40, 0xde, 0x09, 0x2e, 0xf5, 0x7d, 0xf2, 0xe3, 0xd2, 0xac, 0x25,
- 0xbe, 0xc7, 0x81, 0xb5, 0x7c, 0xbf, 0xb0, 0x44, 0x13, 0xc3, 0x77, 0xd1, 0x5a, 0x4e, 0xa3, 0x87,
- 0x80, 0xfc, 0xa4, 0x99, 0x9e, 0xb7, 0x33, 0x99, 0xe9, 0x31, 0x64, 0xab, 0xb4, 0x10, 0x2e, 0xda,
- 0xea, 0x81, 0x20, 0x3f, 0x9f, 0x69, 0x6b, 0x1f, 0xc4, 0x94, 0xad, 0x7d, 0x10, 0xb8, 0x87, 0x9e,
- 0xcd, 0x69, 0xbc, 0xbe, 0x1c, 0x4b, 0x37, 0xa1, 0x9c, 0x3f, 0x8a, 0x99, 0x4f, 0x7e, 0xd1, 0x94,
- 0xaf, 0xd8, 0x29, 0xdb, 0x0a, 0x7d, 0x68, 0xc0, 0x29, 0xfb, 0x33, 0xd4, 0x9a, 0xc6, 0x0f, 0xd0,
- 0xc6, 0x98, 0x5f, 0x39, 0x4f, 0x2e, 0x8b, 0x43, 0x20, 0x4f, 0xb5, 0xc6, 0xf5, 0x19, 0xb6, 0xd5,
- 0x2c, 0xc6, 0x79, 0xdb, 0x5c, 0xa2, 0x93, 0x19, 0xfc, 0x01, 0xba, 0x9c, 0x33, 0xeb, 0xd1, 0xd4,
- 0xd4, 0xbf, 0x6a, 0xea, 0x17, 0xed, 0xd4, 0x66, 0x46, 0xc7, 0xb8, 0x31, 0x9d, 0x4a, 0xe1, 0xdb,
- 0x68, 0x35, 0x27, 0x0f, 0x03, 0x2e, 0xc8, 0x6f, 0x9a, 0xf5, 0xaa, 0x9d, 0xf5, 0x4e, 0xc0, 0x45,
- 0xa1, 0x8f, 0xd2, 0x60, 0xc6, 0x24, 0xad, 0x69, 0xa6, 0xdf, 0x67, 0x32, 0x49, 0xe9, 0x29, 0xa6,
- 0x34, 0x98, 0x1d, 0xbd, 0x62, 0x92, 0x1d, 0xf9, 0x55, 0x79, 0xd6, 0xd1, 0xcb, 0x9a, 0xc9, 0x8e,
- 0x34, 0xb1, 0xac, 0x23, 0x15, 0x8d, 0xe9, 0xc8, 0xaf, 0xcb, 0xb3, 0x3a, 0x52, 0x56, 0x59, 0x3a,
- 0x32, 0x0f, 0x17, 0x6d, 0xc9, 0x8e, 0xfc, 0xe6, 0x4c, 0x5b, 0x93, 0x1d, 0x69, 0x62, 0xf8, 0x21,
- 0xaa, 0x8e, 0xd1, 0xa8, 0x46, 0x49, 0x80, 0x0d, 0x02, 0xae, 0xfe, 0x7f, 0xbf, 0xd5, 0x9c, 0x37,
- 0x66, 0x70, 0x4a, 0xf8, 0x61, 0x86, 0x4e, 0xf9, 0xaf, 0x50, 0x7b, 0x1e, 0x0f, 0xd0, 0x66, 0xae,
- 0x65, 0x5a, 0x67, 0x4c, 0xec, 0x3b, 0x2d, 0xf6, 0xaa, 0x5d, 0x4c, 0x77, 0xc9, 0xb4, 0x1a, 0xa1,
- 0x33, 0x00, 0xf8, 0x23, 0xb4, 0xee, 0x85, 0x43, 0x2e, 0x80, 0xb9, 0xe6, 0x2d, 0xe3, 0x72, 0x10,
- 0xe4, 0x33, 0x64, 0x46, 0x60, 0xfc, 0x21, 0xd3, 0x68, 0x6b, 0xe4, 0xfb, 0x1a, 0x78, 0x0f, 0xc4,
- 0xd4, 0xad, 0x77, 0xc9, 0x9b, 0x84, 0xe0, 0x87, 0xe8, 0x4a, 0xaa, 0xa0, 0xc9, 0x5c, 0x2a, 0x04,
- 0x53, 0x2a, 0x9f, 0x23, 0x73, 0x0f, 0xda, 0x54, 0xde, 0x55, 0xb1, 0x96, 0x10, 0xcc, 0x26, 0xb4,
- 0xe1, 0x59, 0x50, 0xf8, 0x43, 0x84, 0xfd, 0xf8, 0x51, 0xd4, 0x63, 0xd4, 0x07, 0x37, 0x88, 0x8e,
- 0x62, 0x25, 0xf3, 0x85, 0x96, 0xb9, 0x56, 0x94, 0xe9, 0xa4, 0xc0, 0x83, 0xe8, 0x28, 0xb6, 0x49,
- 0xac, 0xf9, 0x13, 0x88, 0xfc, 0x31, 0x75, 0x11, 0xad, 0xec, 0x0d, 0x12, 0xf1, 0xd8, 0x01, 0x9e,
- 0xc4, 0x11, 0x87, 0xad, 0xc7, 0x68, 0xf3, 0x8c, 0xeb, 0x1b, 0x63, 0x34, 0xaf, 0xde, 0x72, 0x25,
- 0xf5, 0x96, 0x53, 0xdf, 0xf2, 0x8d, 0x97, 0xdd, 0x6a, 0xe6, 0x8d, 0x97, 0xfe, 0xc6, 0x57, 0xd1,
- 0x32, 0x0f, 0x06, 0x49, 0x08, 0xae, 0x88, 0x8f, 0x41, 0x3f, 0xf1, 0xca, 0x4e, 0x45, 0xc7, 0xee,
- 0xcb, 0x50, 0xe6, 0xe5, 0xd6, 0xc6, 0x93, 0x3f, 0x6b, 0xe7, 0x9e, 0x9c, 0xd6, 0x4a, 0x4f, 0x4f,
- 0x6b, 0xa5, 0x3f, 0x4e, 0x6b, 0xa5, 0x2f, 0xff, 0xaa, 0x9d, 0xeb, 0x2e, 0xa8, 0x97, 0xe6, 0xee,
- 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x30, 0x36, 0x53, 0xc6, 0x0b, 0x0b, 0x00, 0x00,
-}
-
-func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.AuthRevision != 0 {
- i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Username) > 0 {
- i -= len(m.Username)
- copy(dAtA[i:], m.Username)
- i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
- i--
- dAtA[i] = 0x12
- }
- if m.ID != 0 {
- i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.DowngradeInfoSet != nil {
- {
- size, err := m.DowngradeInfoSet.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x51
- i--
- dAtA[i] = 0xb2
- }
- if m.ClusterMemberAttrSet != nil {
- {
- size, err := m.ClusterMemberAttrSet.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x51
- i--
- dAtA[i] = 0xaa
- }
- if m.ClusterVersionSet != nil {
- {
- size, err := m.ClusterVersionSet.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x51
- i--
- dAtA[i] = 0xa2
- }
- if m.AuthRoleRevokePermission != nil {
- {
- size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0xa2
- }
- if m.AuthRoleGrantPermission != nil {
- {
- size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0x9a
- }
- if m.AuthRoleGet != nil {
- {
- size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0x92
- }
- if m.AuthRoleDelete != nil {
- {
- size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0x8a
- }
- if m.AuthRoleAdd != nil {
- {
- size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0x82
- }
- if m.AuthRoleList != nil {
- {
- size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x45
- i--
- dAtA[i] = 0x9a
- }
- if m.AuthUserList != nil {
- {
- size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x45
- i--
- dAtA[i] = 0x92
- }
- if m.AuthUserRevokeRole != nil {
- {
- size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x45
- i--
- dAtA[i] = 0x8a
- }
- if m.AuthUserGrantRole != nil {
- {
- size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x45
- i--
- dAtA[i] = 0x82
- }
- if m.AuthUserChangePassword != nil {
- {
- size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x44
- i--
- dAtA[i] = 0xfa
- }
- if m.AuthUserGet != nil {
- {
- size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x44
- i--
- dAtA[i] = 0xf2
- }
- if m.AuthUserDelete != nil {
- {
- size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x44
- i--
- dAtA[i] = 0xea
- }
- if m.AuthUserAdd != nil {
- {
- size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x44
- i--
- dAtA[i] = 0xe2
- }
- if m.AuthStatus != nil {
- {
- size, err := m.AuthStatus.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3f
- i--
- dAtA[i] = 0xaa
- }
- if m.Authenticate != nil {
- {
- size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3f
- i--
- dAtA[i] = 0xa2
- }
- if m.AuthDisable != nil {
- {
- size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3f
- i--
- dAtA[i] = 0x9a
- }
- if m.AuthEnable != nil {
- {
- size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3e
- i--
- dAtA[i] = 0xc2
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x6
- i--
- dAtA[i] = 0xa2
- }
- if m.LeaseCheckpoint != nil {
- {
- size, err := m.LeaseCheckpoint.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- if m.Alarm != nil {
- {
- size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x52
- }
- if m.LeaseRevoke != nil {
- {
- size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- if m.LeaseGrant != nil {
- {
- size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- if m.Compaction != nil {
- {
- size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- if m.Txn != nil {
- {
- size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- if m.DeleteRange != nil {
- {
- size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if m.Put != nil {
- {
- size, err := m.Put.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- if m.Range != nil {
- {
- size, err := m.Range.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.V2 != nil {
- {
- size, err := m.V2.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.ID != 0 {
- i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.SimpleToken) > 0 {
- i -= len(m.SimpleToken)
- copy(dAtA[i:], m.SimpleToken)
- i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
- offset -= sovRaftInternal(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *RequestHeader) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRaftInternal(uint64(m.ID))
- }
- l = len(m.Username)
- if l > 0 {
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRevision != 0 {
- n += 1 + sovRaftInternal(uint64(m.AuthRevision))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *InternalRaftRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRaftInternal(uint64(m.ID))
- }
- if m.V2 != nil {
- l = m.V2.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Range != nil {
- l = m.Range.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Put != nil {
- l = m.Put.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.DeleteRange != nil {
- l = m.DeleteRange.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Txn != nil {
- l = m.Txn.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Compaction != nil {
- l = m.Compaction.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.LeaseGrant != nil {
- l = m.LeaseGrant.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.LeaseRevoke != nil {
- l = m.LeaseRevoke.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Alarm != nil {
- l = m.Alarm.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.LeaseCheckpoint != nil {
- l = m.LeaseCheckpoint.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Header != nil {
- l = m.Header.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthEnable != nil {
- l = m.AuthEnable.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthDisable != nil {
- l = m.AuthDisable.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.Authenticate != nil {
- l = m.Authenticate.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthStatus != nil {
- l = m.AuthStatus.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserAdd != nil {
- l = m.AuthUserAdd.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserDelete != nil {
- l = m.AuthUserDelete.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserGet != nil {
- l = m.AuthUserGet.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserChangePassword != nil {
- l = m.AuthUserChangePassword.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserGrantRole != nil {
- l = m.AuthUserGrantRole.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserRevokeRole != nil {
- l = m.AuthUserRevokeRole.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserList != nil {
- l = m.AuthUserList.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleList != nil {
- l = m.AuthRoleList.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleAdd != nil {
- l = m.AuthRoleAdd.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleDelete != nil {
- l = m.AuthRoleDelete.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleGet != nil {
- l = m.AuthRoleGet.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleGrantPermission != nil {
- l = m.AuthRoleGrantPermission.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleRevokePermission != nil {
- l = m.AuthRoleRevokePermission.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.ClusterVersionSet != nil {
- l = m.ClusterVersionSet.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.ClusterMemberAttrSet != nil {
- l = m.ClusterMemberAttrSet.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.DowngradeInfoSet != nil {
- l = m.DowngradeInfoSet.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *EmptyResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *InternalAuthenticateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- l = len(m.SimpleToken)
- if l > 0 {
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovRaftInternal(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRaftInternal(x uint64) (n int) {
- return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *RequestHeader) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Username = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
- }
- m.AuthRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AuthRevision |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRaftInternal(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.V2 == nil {
- m.V2 = &Request{}
- }
- if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Range == nil {
- m.Range = &RangeRequest{}
- }
- if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Put == nil {
- m.Put = &PutRequest{}
- }
- if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.DeleteRange == nil {
- m.DeleteRange = &DeleteRangeRequest{}
- }
- if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Txn == nil {
- m.Txn = &TxnRequest{}
- }
- if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Compaction == nil {
- m.Compaction = &CompactionRequest{}
- }
- if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.LeaseGrant == nil {
- m.LeaseGrant = &LeaseGrantRequest{}
- }
- if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.LeaseRevoke == nil {
- m.LeaseRevoke = &LeaseRevokeRequest{}
- }
- if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Alarm == nil {
- m.Alarm = &AlarmRequest{}
- }
- if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.LeaseCheckpoint == nil {
- m.LeaseCheckpoint = &LeaseCheckpointRequest{}
- }
- if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 100:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &RequestHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1000:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthEnable == nil {
- m.AuthEnable = &AuthEnableRequest{}
- }
- if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1011:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthDisable == nil {
- m.AuthDisable = &AuthDisableRequest{}
- }
- if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1012:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Authenticate == nil {
- m.Authenticate = &InternalAuthenticateRequest{}
- }
- if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1013:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthStatus", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthStatus == nil {
- m.AuthStatus = &AuthStatusRequest{}
- }
- if err := m.AuthStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1100:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserAdd == nil {
- m.AuthUserAdd = &AuthUserAddRequest{}
- }
- if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1101:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserDelete == nil {
- m.AuthUserDelete = &AuthUserDeleteRequest{}
- }
- if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1102:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserGet == nil {
- m.AuthUserGet = &AuthUserGetRequest{}
- }
- if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1103:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserChangePassword == nil {
- m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
- }
- if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1104:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserGrantRole == nil {
- m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
- }
- if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1105:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserRevokeRole == nil {
- m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
- }
- if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1106:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserList == nil {
- m.AuthUserList = &AuthUserListRequest{}
- }
- if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1107:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleList == nil {
- m.AuthRoleList = &AuthRoleListRequest{}
- }
- if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1200:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleAdd == nil {
- m.AuthRoleAdd = &AuthRoleAddRequest{}
- }
- if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1201:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleDelete == nil {
- m.AuthRoleDelete = &AuthRoleDeleteRequest{}
- }
- if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1202:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleGet == nil {
- m.AuthRoleGet = &AuthRoleGetRequest{}
- }
- if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1203:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleGrantPermission == nil {
- m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
- }
- if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1204:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleRevokePermission == nil {
- m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
- }
- if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1300:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersionSet", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.ClusterVersionSet == nil {
- m.ClusterVersionSet = &membershippb.ClusterVersionSetRequest{}
- }
- if err := m.ClusterVersionSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1301:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClusterMemberAttrSet", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.ClusterMemberAttrSet == nil {
- m.ClusterMemberAttrSet = &membershippb.ClusterMemberAttrSetRequest{}
- }
- if err := m.ClusterMemberAttrSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1302:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DowngradeInfoSet", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.DowngradeInfoSet == nil {
- m.DowngradeInfoSet = &membershippb.DowngradeInfoSetRequest{}
- }
- if err := m.DowngradeInfoSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaftInternal(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRaftInternal(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SimpleToken = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaftInternal(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipRaftInternal(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthRaftInternal
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupRaftInternal
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRaftInternal
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupRaftInternal = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/api/etcdserverpb/raft_internal_stringer_test.go b/api/etcdserverpb/raft_internal_stringer_test.go
deleted file mode 100644
index f6280e91351..00000000000
--- a/api/etcdserverpb/raft_internal_stringer_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2020 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserverpb_test
-
-import (
- "testing"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-)
-
-// TestInvalidGoTypeIntPanic tests conditions that caused
-// panic: invalid Go type int for field k8s_io.kubernetes.vendor.go_etcd_io.etcd.etcdserver.etcdserverpb.loggablePutRequest.value_size
-// See https://github.com/kubernetes/kubernetes/issues/91937 for more details
-func TestInvalidGoTypeIntPanic(t *testing.T) {
- result := pb.NewLoggablePutRequest(&pb.PutRequest{}).String()
- if result != "" {
- t.Errorf("Got result: %s, expected empty string", result)
- }
-}
diff --git a/api/etcdserverpb/rpc.pb.go b/api/etcdserverpb/rpc.pb.go
deleted file mode 100644
index 0b68fe5a3e1..00000000000
--- a/api/etcdserverpb/rpc.pb.go
+++ /dev/null
@@ -1,26026 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: rpc.proto
-
-package etcdserverpb
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
- authpb "go.etcd.io/etcd/api/v3/authpb"
- mvccpb "go.etcd.io/etcd/api/v3/mvccpb"
- _ "go.etcd.io/etcd/api/v3/versionpb"
- _ "google.golang.org/genproto/googleapis/api/annotations"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type AlarmType int32
-
-const (
- AlarmType_NONE AlarmType = 0
- AlarmType_NOSPACE AlarmType = 1
- AlarmType_CORRUPT AlarmType = 2
-)
-
-var AlarmType_name = map[int32]string{
- 0: "NONE",
- 1: "NOSPACE",
- 2: "CORRUPT",
-}
-
-var AlarmType_value = map[string]int32{
- "NONE": 0,
- "NOSPACE": 1,
- "CORRUPT": 2,
-}
-
-func (x AlarmType) String() string {
- return proto.EnumName(AlarmType_name, int32(x))
-}
-
-func (AlarmType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{0}
-}
-
-type RangeRequest_SortOrder int32
-
-const (
- RangeRequest_NONE RangeRequest_SortOrder = 0
- RangeRequest_ASCEND RangeRequest_SortOrder = 1
- RangeRequest_DESCEND RangeRequest_SortOrder = 2
-)
-
-var RangeRequest_SortOrder_name = map[int32]string{
- 0: "NONE",
- 1: "ASCEND",
- 2: "DESCEND",
-}
-
-var RangeRequest_SortOrder_value = map[string]int32{
- "NONE": 0,
- "ASCEND": 1,
- "DESCEND": 2,
-}
-
-func (x RangeRequest_SortOrder) String() string {
- return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
-}
-
-func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{1, 0}
-}
-
-type RangeRequest_SortTarget int32
-
-const (
- RangeRequest_KEY RangeRequest_SortTarget = 0
- RangeRequest_VERSION RangeRequest_SortTarget = 1
- RangeRequest_CREATE RangeRequest_SortTarget = 2
- RangeRequest_MOD RangeRequest_SortTarget = 3
- RangeRequest_VALUE RangeRequest_SortTarget = 4
-)
-
-var RangeRequest_SortTarget_name = map[int32]string{
- 0: "KEY",
- 1: "VERSION",
- 2: "CREATE",
- 3: "MOD",
- 4: "VALUE",
-}
-
-var RangeRequest_SortTarget_value = map[string]int32{
- "KEY": 0,
- "VERSION": 1,
- "CREATE": 2,
- "MOD": 3,
- "VALUE": 4,
-}
-
-func (x RangeRequest_SortTarget) String() string {
- return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
-}
-
-func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{1, 1}
-}
-
-type Compare_CompareResult int32
-
-const (
- Compare_EQUAL Compare_CompareResult = 0
- Compare_GREATER Compare_CompareResult = 1
- Compare_LESS Compare_CompareResult = 2
- Compare_NOT_EQUAL Compare_CompareResult = 3
-)
-
-var Compare_CompareResult_name = map[int32]string{
- 0: "EQUAL",
- 1: "GREATER",
- 2: "LESS",
- 3: "NOT_EQUAL",
-}
-
-var Compare_CompareResult_value = map[string]int32{
- "EQUAL": 0,
- "GREATER": 1,
- "LESS": 2,
- "NOT_EQUAL": 3,
-}
-
-func (x Compare_CompareResult) String() string {
- return proto.EnumName(Compare_CompareResult_name, int32(x))
-}
-
-func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{9, 0}
-}
-
-type Compare_CompareTarget int32
-
-const (
- Compare_VERSION Compare_CompareTarget = 0
- Compare_CREATE Compare_CompareTarget = 1
- Compare_MOD Compare_CompareTarget = 2
- Compare_VALUE Compare_CompareTarget = 3
- Compare_LEASE Compare_CompareTarget = 4
-)
-
-var Compare_CompareTarget_name = map[int32]string{
- 0: "VERSION",
- 1: "CREATE",
- 2: "MOD",
- 3: "VALUE",
- 4: "LEASE",
-}
-
-var Compare_CompareTarget_value = map[string]int32{
- "VERSION": 0,
- "CREATE": 1,
- "MOD": 2,
- "VALUE": 3,
- "LEASE": 4,
-}
-
-func (x Compare_CompareTarget) String() string {
- return proto.EnumName(Compare_CompareTarget_name, int32(x))
-}
-
-func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{9, 1}
-}
-
-type WatchCreateRequest_FilterType int32
-
-const (
- // filter out put event.
- WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
- // filter out delete event.
- WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
-)
-
-var WatchCreateRequest_FilterType_name = map[int32]string{
- 0: "NOPUT",
- 1: "NODELETE",
-}
-
-var WatchCreateRequest_FilterType_value = map[string]int32{
- "NOPUT": 0,
- "NODELETE": 1,
-}
-
-func (x WatchCreateRequest_FilterType) String() string {
- return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
-}
-
-func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{21, 0}
-}
-
-type AlarmRequest_AlarmAction int32
-
-const (
- AlarmRequest_GET AlarmRequest_AlarmAction = 0
- AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1
- AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
-)
-
-var AlarmRequest_AlarmAction_name = map[int32]string{
- 0: "GET",
- 1: "ACTIVATE",
- 2: "DEACTIVATE",
-}
-
-var AlarmRequest_AlarmAction_value = map[string]int32{
- "GET": 0,
- "ACTIVATE": 1,
- "DEACTIVATE": 2,
-}
-
-func (x AlarmRequest_AlarmAction) String() string {
- return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
-}
-
-func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{54, 0}
-}
-
-type DowngradeRequest_DowngradeAction int32
-
-const (
- DowngradeRequest_VALIDATE DowngradeRequest_DowngradeAction = 0
- DowngradeRequest_ENABLE DowngradeRequest_DowngradeAction = 1
- DowngradeRequest_CANCEL DowngradeRequest_DowngradeAction = 2
-)
-
-var DowngradeRequest_DowngradeAction_name = map[int32]string{
- 0: "VALIDATE",
- 1: "ENABLE",
- 2: "CANCEL",
-}
-
-var DowngradeRequest_DowngradeAction_value = map[string]int32{
- "VALIDATE": 0,
- "ENABLE": 1,
- "CANCEL": 2,
-}
-
-func (x DowngradeRequest_DowngradeAction) String() string {
- return proto.EnumName(DowngradeRequest_DowngradeAction_name, int32(x))
-}
-
-func (DowngradeRequest_DowngradeAction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{57, 0}
-}
-
-type ResponseHeader struct {
- // cluster_id is the ID of the cluster which sent the response.
- ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
- // member_id is the ID of the member which sent the response.
- MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
- // revision is the key-value store revision when the request was applied, and it's
- // unset (so 0) in case of calls not interacting with key-value store.
- // For watch progress responses, the header.revision indicates progress. All future events
- // received in this stream are guaranteed to have a higher revision number than the
- // header.revision number.
- Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
- // raft_term is the raft term when the request was applied.
- RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ResponseHeader) Reset() { *m = ResponseHeader{} }
-func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
-func (*ResponseHeader) ProtoMessage() {}
-func (*ResponseHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{0}
-}
-func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResponseHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResponseHeader.Merge(m, src)
-}
-func (m *ResponseHeader) XXX_Size() int {
- return m.Size()
-}
-func (m *ResponseHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
-
-func (m *ResponseHeader) GetClusterId() uint64 {
- if m != nil {
- return m.ClusterId
- }
- return 0
-}
-
-func (m *ResponseHeader) GetMemberId() uint64 {
- if m != nil {
- return m.MemberId
- }
- return 0
-}
-
-func (m *ResponseHeader) GetRevision() int64 {
- if m != nil {
- return m.Revision
- }
- return 0
-}
-
-func (m *ResponseHeader) GetRaftTerm() uint64 {
- if m != nil {
- return m.RaftTerm
- }
- return 0
-}
-
-type RangeRequest struct {
- // key is the first key for the range. If range_end is not given, the request only looks up key.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // range_end is the upper bound on the requested range [key, range_end).
- // If range_end is '\0', the range is all keys >= key.
- // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
- // then the range request gets all keys prefixed with key.
- // If both key and range_end are '\0', then the range request returns all keys.
- RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- // limit is a limit on the number of keys returned for the request. When limit is set to 0,
- // it is treated as no limit.
- Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
- // revision is the point-in-time of the key-value store to use for the range.
- // If revision is less or equal to zero, the range is over the newest key-value store.
- // If the revision has been compacted, ErrCompacted is returned as a response.
- Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
- // sort_order is the order for returned sorted results.
- SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
- // sort_target is the key-value field to use for sorting.
- SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
- // serializable sets the range request to use serializable member-local reads.
- // Range requests are linearizable by default; linearizable requests have higher
- // latency and lower throughput than serializable requests but reflect the current
- // consensus of the cluster. For better performance, in exchange for possible stale reads,
- // a serializable range request is served locally without needing to reach consensus
- // with other nodes in the cluster.
- Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
- // keys_only when set returns only the keys and not the values.
- KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
- // count_only when set returns only the count of the keys in the range.
- CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
- // min_mod_revision is the lower bound for returned key mod revisions; all keys with
- // lesser mod revisions will be filtered away.
- MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
- // max_mod_revision is the upper bound for returned key mod revisions; all keys with
- // greater mod revisions will be filtered away.
- MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
- // min_create_revision is the lower bound for returned key create revisions; all keys with
- // lesser create revisions will be filtered away.
- MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
- // max_create_revision is the upper bound for returned key create revisions; all keys with
- // greater create revisions will be filtered away.
- MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RangeRequest) Reset() { *m = RangeRequest{} }
-func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
-func (*RangeRequest) ProtoMessage() {}
-func (*RangeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{1}
-}
-func (m *RangeRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RangeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RangeRequest.Merge(m, src)
-}
-func (m *RangeRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *RangeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_RangeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RangeRequest proto.InternalMessageInfo
-
-func (m *RangeRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *RangeRequest) GetRangeEnd() []byte {
- if m != nil {
- return m.RangeEnd
- }
- return nil
-}
-
-func (m *RangeRequest) GetLimit() int64 {
- if m != nil {
- return m.Limit
- }
- return 0
-}
-
-func (m *RangeRequest) GetRevision() int64 {
- if m != nil {
- return m.Revision
- }
- return 0
-}
-
-func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
- if m != nil {
- return m.SortOrder
- }
- return RangeRequest_NONE
-}
-
-func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
- if m != nil {
- return m.SortTarget
- }
- return RangeRequest_KEY
-}
-
-func (m *RangeRequest) GetSerializable() bool {
- if m != nil {
- return m.Serializable
- }
- return false
-}
-
-func (m *RangeRequest) GetKeysOnly() bool {
- if m != nil {
- return m.KeysOnly
- }
- return false
-}
-
-func (m *RangeRequest) GetCountOnly() bool {
- if m != nil {
- return m.CountOnly
- }
- return false
-}
-
-func (m *RangeRequest) GetMinModRevision() int64 {
- if m != nil {
- return m.MinModRevision
- }
- return 0
-}
-
-func (m *RangeRequest) GetMaxModRevision() int64 {
- if m != nil {
- return m.MaxModRevision
- }
- return 0
-}
-
-func (m *RangeRequest) GetMinCreateRevision() int64 {
- if m != nil {
- return m.MinCreateRevision
- }
- return 0
-}
-
-func (m *RangeRequest) GetMaxCreateRevision() int64 {
- if m != nil {
- return m.MaxCreateRevision
- }
- return 0
-}
-
-type RangeResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // kvs is the list of key-value pairs matched by the range request.
- // kvs is empty when count is requested.
- Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"`
- // more indicates if there are more keys to return in the requested range.
- More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
- // count is set to the number of keys within the range when requested.
- Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RangeResponse) Reset() { *m = RangeResponse{} }
-func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
-func (*RangeResponse) ProtoMessage() {}
-func (*RangeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{2}
-}
-func (m *RangeResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RangeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RangeResponse.Merge(m, src)
-}
-func (m *RangeResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *RangeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_RangeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RangeResponse proto.InternalMessageInfo
-
-func (m *RangeResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
- if m != nil {
- return m.Kvs
- }
- return nil
-}
-
-func (m *RangeResponse) GetMore() bool {
- if m != nil {
- return m.More
- }
- return false
-}
-
-func (m *RangeResponse) GetCount() int64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-type PutRequest struct {
- // key is the key, in bytes, to put into the key-value store.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // value is the value, in bytes, to associate with the key in the key-value store.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- // lease is the lease ID to associate with the key in the key-value store. A lease
- // value of 0 indicates no lease.
- Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
- // If prev_kv is set, etcd gets the previous key-value pair before changing it.
- // The previous key-value pair will be returned in the put response.
- PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- // If ignore_value is set, etcd updates the key using its current value.
- // Returns an error if the key does not exist.
- IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
- // If ignore_lease is set, etcd updates the key using its current lease.
- // Returns an error if the key does not exist.
- IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutRequest) Reset() { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage() {}
-func (*PutRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{3}
-}
-func (m *PutRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *PutRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutRequest.Merge(m, src)
-}
-func (m *PutRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *PutRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_PutRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutRequest proto.InternalMessageInfo
-
-func (m *PutRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *PutRequest) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *PutRequest) GetLease() int64 {
- if m != nil {
- return m.Lease
- }
- return 0
-}
-
-func (m *PutRequest) GetPrevKv() bool {
- if m != nil {
- return m.PrevKv
- }
- return false
-}
-
-func (m *PutRequest) GetIgnoreValue() bool {
- if m != nil {
- return m.IgnoreValue
- }
- return false
-}
-
-func (m *PutRequest) GetIgnoreLease() bool {
- if m != nil {
- return m.IgnoreLease
- }
- return false
-}
-
-type PutResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // if prev_kv is set in the request, the previous key-value pair will be returned.
- PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutResponse) Reset() { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage() {}
-func (*PutResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{4}
-}
-func (m *PutResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *PutResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutResponse.Merge(m, src)
-}
-func (m *PutResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *PutResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_PutResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutResponse proto.InternalMessageInfo
-
-func (m *PutResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
- if m != nil {
- return m.PrevKv
- }
- return nil
-}
-
-type DeleteRangeRequest struct {
- // key is the first key to delete in the range.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // range_end is the key following the last key to delete for the range [key, range_end).
- // If range_end is not given, the range is defined to contain only the key argument.
- // If range_end is one bit larger than the given key, then the range is all the keys
- // with the prefix (the given key).
- // If range_end is '\0', the range is all keys greater than or equal to the key argument.
- RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
- // The previous key-value pairs will be returned in the delete response.
- PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} }
-func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRangeRequest) ProtoMessage() {}
-func (*DeleteRangeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{5}
-}
-func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteRangeRequest.Merge(m, src)
-}
-func (m *DeleteRangeRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeleteRangeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo
-
-func (m *DeleteRangeRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *DeleteRangeRequest) GetRangeEnd() []byte {
- if m != nil {
- return m.RangeEnd
- }
- return nil
-}
-
-func (m *DeleteRangeRequest) GetPrevKv() bool {
- if m != nil {
- return m.PrevKv
- }
- return false
-}
-
-type DeleteRangeResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // deleted is the number of keys deleted by the delete range request.
- Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
- // if prev_kv is set in the request, the previous key-value pairs will be returned.
- PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} }
-func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteRangeResponse) ProtoMessage() {}
-func (*DeleteRangeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{6}
-}
-func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteRangeResponse.Merge(m, src)
-}
-func (m *DeleteRangeResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *DeleteRangeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo
-
-func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *DeleteRangeResponse) GetDeleted() int64 {
- if m != nil {
- return m.Deleted
- }
- return 0
-}
-
-func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
- if m != nil {
- return m.PrevKvs
- }
- return nil
-}
-
-type RequestOp struct {
- // request is a union of request types accepted by a transaction.
- //
- // Types that are valid to be assigned to Request:
- // *RequestOp_RequestRange
- // *RequestOp_RequestPut
- // *RequestOp_RequestDeleteRange
- // *RequestOp_RequestTxn
- Request isRequestOp_Request `protobuf_oneof:"request"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RequestOp) Reset() { *m = RequestOp{} }
-func (m *RequestOp) String() string { return proto.CompactTextString(m) }
-func (*RequestOp) ProtoMessage() {}
-func (*RequestOp) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{7}
-}
-func (m *RequestOp) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RequestOp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RequestOp.Merge(m, src)
-}
-func (m *RequestOp) XXX_Size() int {
- return m.Size()
-}
-func (m *RequestOp) XXX_DiscardUnknown() {
- xxx_messageInfo_RequestOp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestOp proto.InternalMessageInfo
-
-type isRequestOp_Request interface {
- isRequestOp_Request()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type RequestOp_RequestRange struct {
- RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof" json:"request_range,omitempty"`
-}
-type RequestOp_RequestPut struct {
- RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof" json:"request_put,omitempty"`
-}
-type RequestOp_RequestDeleteRange struct {
- RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof" json:"request_delete_range,omitempty"`
-}
-type RequestOp_RequestTxn struct {
- RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof" json:"request_txn,omitempty"`
-}
-
-func (*RequestOp_RequestRange) isRequestOp_Request() {}
-func (*RequestOp_RequestPut) isRequestOp_Request() {}
-func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
-func (*RequestOp_RequestTxn) isRequestOp_Request() {}
-
-func (m *RequestOp) GetRequest() isRequestOp_Request {
- if m != nil {
- return m.Request
- }
- return nil
-}
-
-func (m *RequestOp) GetRequestRange() *RangeRequest {
- if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
- return x.RequestRange
- }
- return nil
-}
-
-func (m *RequestOp) GetRequestPut() *PutRequest {
- if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
- return x.RequestPut
- }
- return nil
-}
-
-func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
- if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
- return x.RequestDeleteRange
- }
- return nil
-}
-
-func (m *RequestOp) GetRequestTxn() *TxnRequest {
- if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
- return x.RequestTxn
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*RequestOp) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*RequestOp_RequestRange)(nil),
- (*RequestOp_RequestPut)(nil),
- (*RequestOp_RequestDeleteRange)(nil),
- (*RequestOp_RequestTxn)(nil),
- }
-}
-
-type ResponseOp struct {
- // response is a union of response types returned by a transaction.
- //
- // Types that are valid to be assigned to Response:
- // *ResponseOp_ResponseRange
- // *ResponseOp_ResponsePut
- // *ResponseOp_ResponseDeleteRange
- // *ResponseOp_ResponseTxn
- Response isResponseOp_Response `protobuf_oneof:"response"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ResponseOp) Reset() { *m = ResponseOp{} }
-func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
-func (*ResponseOp) ProtoMessage() {}
-func (*ResponseOp) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{8}
-}
-func (m *ResponseOp) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResponseOp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResponseOp.Merge(m, src)
-}
-func (m *ResponseOp) XXX_Size() int {
- return m.Size()
-}
-func (m *ResponseOp) XXX_DiscardUnknown() {
- xxx_messageInfo_ResponseOp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseOp proto.InternalMessageInfo
-
-type isResponseOp_Response interface {
- isResponseOp_Response()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type ResponseOp_ResponseRange struct {
- ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof" json:"response_range,omitempty"`
-}
-type ResponseOp_ResponsePut struct {
- ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof" json:"response_put,omitempty"`
-}
-type ResponseOp_ResponseDeleteRange struct {
- ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof" json:"response_delete_range,omitempty"`
-}
-type ResponseOp_ResponseTxn struct {
- ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof" json:"response_txn,omitempty"`
-}
-
-func (*ResponseOp_ResponseRange) isResponseOp_Response() {}
-func (*ResponseOp_ResponsePut) isResponseOp_Response() {}
-func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
-func (*ResponseOp_ResponseTxn) isResponseOp_Response() {}
-
-func (m *ResponseOp) GetResponse() isResponseOp_Response {
- if m != nil {
- return m.Response
- }
- return nil
-}
-
-func (m *ResponseOp) GetResponseRange() *RangeResponse {
- if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
- return x.ResponseRange
- }
- return nil
-}
-
-func (m *ResponseOp) GetResponsePut() *PutResponse {
- if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
- return x.ResponsePut
- }
- return nil
-}
-
-func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
- if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
- return x.ResponseDeleteRange
- }
- return nil
-}
-
-func (m *ResponseOp) GetResponseTxn() *TxnResponse {
- if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
- return x.ResponseTxn
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ResponseOp) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*ResponseOp_ResponseRange)(nil),
- (*ResponseOp_ResponsePut)(nil),
- (*ResponseOp_ResponseDeleteRange)(nil),
- (*ResponseOp_ResponseTxn)(nil),
- }
-}
-
-type Compare struct {
- // result is logical comparison operation for this comparison.
- Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
- // target is the key-value field to inspect for the comparison.
- Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
- // key is the subject key for the comparison operation.
- Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
- // Types that are valid to be assigned to TargetUnion:
- // *Compare_Version
- // *Compare_CreateRevision
- // *Compare_ModRevision
- // *Compare_Value
- // *Compare_Lease
- TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
- // range_end compares the given target to all keys in the range [key, range_end).
- // See RangeRequest for more details on key ranges.
- RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Compare) Reset() { *m = Compare{} }
-func (m *Compare) String() string { return proto.CompactTextString(m) }
-func (*Compare) ProtoMessage() {}
-func (*Compare) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{9}
-}
-func (m *Compare) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Compare.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Compare) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Compare.Merge(m, src)
-}
-func (m *Compare) XXX_Size() int {
- return m.Size()
-}
-func (m *Compare) XXX_DiscardUnknown() {
- xxx_messageInfo_Compare.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Compare proto.InternalMessageInfo
-
-type isCompare_TargetUnion interface {
- isCompare_TargetUnion()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Compare_Version struct {
- Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof" json:"version,omitempty"`
-}
-type Compare_CreateRevision struct {
- CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof" json:"create_revision,omitempty"`
-}
-type Compare_ModRevision struct {
- ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof" json:"mod_revision,omitempty"`
-}
-type Compare_Value struct {
- Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof" json:"value,omitempty"`
-}
-type Compare_Lease struct {
- Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof" json:"lease,omitempty"`
-}
-
-func (*Compare_Version) isCompare_TargetUnion() {}
-func (*Compare_CreateRevision) isCompare_TargetUnion() {}
-func (*Compare_ModRevision) isCompare_TargetUnion() {}
-func (*Compare_Value) isCompare_TargetUnion() {}
-func (*Compare_Lease) isCompare_TargetUnion() {}
-
-func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
- if m != nil {
- return m.TargetUnion
- }
- return nil
-}
-
-func (m *Compare) GetResult() Compare_CompareResult {
- if m != nil {
- return m.Result
- }
- return Compare_EQUAL
-}
-
-func (m *Compare) GetTarget() Compare_CompareTarget {
- if m != nil {
- return m.Target
- }
- return Compare_VERSION
-}
-
-func (m *Compare) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *Compare) GetVersion() int64 {
- if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
- return x.Version
- }
- return 0
-}
-
-func (m *Compare) GetCreateRevision() int64 {
- if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
- return x.CreateRevision
- }
- return 0
-}
-
-func (m *Compare) GetModRevision() int64 {
- if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
- return x.ModRevision
- }
- return 0
-}
-
-func (m *Compare) GetValue() []byte {
- if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
- return x.Value
- }
- return nil
-}
-
-func (m *Compare) GetLease() int64 {
- if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
- return x.Lease
- }
- return 0
-}
-
-func (m *Compare) GetRangeEnd() []byte {
- if m != nil {
- return m.RangeEnd
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Compare) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Compare_Version)(nil),
- (*Compare_CreateRevision)(nil),
- (*Compare_ModRevision)(nil),
- (*Compare_Value)(nil),
- (*Compare_Lease)(nil),
- }
-}
-
-// From google paxosdb paper:
-// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
-// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
-// and consists of three components:
-// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
-// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
-// may apply to the same or different entries in the database. All tests in the guard are applied and
-// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
-// it executes f op (see item 3 below).
-// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
-// lookup operation, and applies to a single database entry. Two different operations in the list may apply
-// to the same or different entries in the database. These operations are executed
-// if guard evaluates to
-// true.
-// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
-type TxnRequest struct {
- // compare is a list of predicates representing a conjunction of terms.
- // If the comparisons succeed, then the success requests will be processed in order,
- // and the response will contain their respective responses in order.
- // If the comparisons fail, then the failure requests will be processed in order,
- // and the response will contain their respective responses in order.
- Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"`
- // success is a list of requests which will be applied when compare evaluates to true.
- Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"`
- // failure is a list of requests which will be applied when compare evaluates to false.
- Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TxnRequest) Reset() { *m = TxnRequest{} }
-func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
-func (*TxnRequest) ProtoMessage() {}
-func (*TxnRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{10}
-}
-func (m *TxnRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *TxnRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TxnRequest.Merge(m, src)
-}
-func (m *TxnRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *TxnRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TxnRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TxnRequest proto.InternalMessageInfo
-
-func (m *TxnRequest) GetCompare() []*Compare {
- if m != nil {
- return m.Compare
- }
- return nil
-}
-
-func (m *TxnRequest) GetSuccess() []*RequestOp {
- if m != nil {
- return m.Success
- }
- return nil
-}
-
-func (m *TxnRequest) GetFailure() []*RequestOp {
- if m != nil {
- return m.Failure
- }
- return nil
-}
-
-type TxnResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // succeeded is set to true if the compare evaluated to true or false otherwise.
- Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
- // responses is a list of responses corresponding to the results from applying
- // success if succeeded is true or failure if succeeded is false.
- Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TxnResponse) Reset() { *m = TxnResponse{} }
-func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
-func (*TxnResponse) ProtoMessage() {}
-func (*TxnResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{11}
-}
-func (m *TxnResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *TxnResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TxnResponse.Merge(m, src)
-}
-func (m *TxnResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *TxnResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_TxnResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TxnResponse proto.InternalMessageInfo
-
-func (m *TxnResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *TxnResponse) GetSucceeded() bool {
- if m != nil {
- return m.Succeeded
- }
- return false
-}
-
-func (m *TxnResponse) GetResponses() []*ResponseOp {
- if m != nil {
- return m.Responses
- }
- return nil
-}
-
-// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
-// with a revision less than the compaction revision will be removed.
-type CompactionRequest struct {
- // revision is the key-value store revision for the compaction operation.
- Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
- // physical is set so the RPC will wait until the compaction is physically
- // applied to the local database such that compacted entries are totally
- // removed from the backend database.
- Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompactionRequest) Reset() { *m = CompactionRequest{} }
-func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
-func (*CompactionRequest) ProtoMessage() {}
-func (*CompactionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{12}
-}
-func (m *CompactionRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CompactionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompactionRequest.Merge(m, src)
-}
-func (m *CompactionRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *CompactionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_CompactionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo
-
-func (m *CompactionRequest) GetRevision() int64 {
- if m != nil {
- return m.Revision
- }
- return 0
-}
-
-func (m *CompactionRequest) GetPhysical() bool {
- if m != nil {
- return m.Physical
- }
- return false
-}
-
-type CompactionResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompactionResponse) Reset() { *m = CompactionResponse{} }
-func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
-func (*CompactionResponse) ProtoMessage() {}
-func (*CompactionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{13}
-}
-func (m *CompactionResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CompactionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompactionResponse.Merge(m, src)
-}
-func (m *CompactionResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *CompactionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_CompactionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo
-
-func (m *CompactionResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type HashRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HashRequest) Reset() { *m = HashRequest{} }
-func (m *HashRequest) String() string { return proto.CompactTextString(m) }
-func (*HashRequest) ProtoMessage() {}
-func (*HashRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{14}
-}
-func (m *HashRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HashRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HashRequest.Merge(m, src)
-}
-func (m *HashRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *HashRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_HashRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashRequest proto.InternalMessageInfo
-
-type HashKVRequest struct {
- // revision is the key-value store revision for the hash operation.
- Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HashKVRequest) Reset() { *m = HashKVRequest{} }
-func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
-func (*HashKVRequest) ProtoMessage() {}
-func (*HashKVRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{15}
-}
-func (m *HashKVRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HashKVRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HashKVRequest.Merge(m, src)
-}
-func (m *HashKVRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *HashKVRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_HashKVRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo
-
-func (m *HashKVRequest) GetRevision() int64 {
- if m != nil {
- return m.Revision
- }
- return 0
-}
-
-type HashKVResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
- Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
- // compact_revision is the compacted revision of key-value store when hash begins.
- CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
- // hash_revision is the revision up to which the hash is calculated.
- HashRevision int64 `protobuf:"varint,4,opt,name=hash_revision,json=hashRevision,proto3" json:"hash_revision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HashKVResponse) Reset() { *m = HashKVResponse{} }
-func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
-func (*HashKVResponse) ProtoMessage() {}
-func (*HashKVResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{16}
-}
-func (m *HashKVResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HashKVResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HashKVResponse.Merge(m, src)
-}
-func (m *HashKVResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *HashKVResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_HashKVResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo
-
-func (m *HashKVResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *HashKVResponse) GetHash() uint32 {
- if m != nil {
- return m.Hash
- }
- return 0
-}
-
-func (m *HashKVResponse) GetCompactRevision() int64 {
- if m != nil {
- return m.CompactRevision
- }
- return 0
-}
-
-func (m *HashKVResponse) GetHashRevision() int64 {
- if m != nil {
- return m.HashRevision
- }
- return 0
-}
-
-type HashResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // hash is the hash value computed from the responding member's KV's backend.
- Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HashResponse) Reset() { *m = HashResponse{} }
-func (m *HashResponse) String() string { return proto.CompactTextString(m) }
-func (*HashResponse) ProtoMessage() {}
-func (*HashResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{17}
-}
-func (m *HashResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HashResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HashResponse.Merge(m, src)
-}
-func (m *HashResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *HashResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_HashResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashResponse proto.InternalMessageInfo
-
-func (m *HashResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *HashResponse) GetHash() uint32 {
- if m != nil {
- return m.Hash
- }
- return 0
-}
-
-type SnapshotRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} }
-func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
-func (*SnapshotRequest) ProtoMessage() {}
-func (*SnapshotRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{18}
-}
-func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SnapshotRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SnapshotRequest.Merge(m, src)
-}
-func (m *SnapshotRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *SnapshotRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_SnapshotRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo
-
-type SnapshotResponse struct {
- // header has the current key-value store information. The first header in the snapshot
- // stream indicates the point in time of the snapshot.
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // remaining_bytes is the number of blob bytes to be sent after this message
- RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
- // blob contains the next chunk of the snapshot in the snapshot stream.
- Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
- // local version of server that created the snapshot.
- // In cluster with binaries with different version, each cluster can return different result.
- // Informs which etcd server version should be used when restoring the snapshot.
- Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} }
-func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
-func (*SnapshotResponse) ProtoMessage() {}
-func (*SnapshotResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{19}
-}
-func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SnapshotResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SnapshotResponse.Merge(m, src)
-}
-func (m *SnapshotResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *SnapshotResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_SnapshotResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo
-
-func (m *SnapshotResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *SnapshotResponse) GetRemainingBytes() uint64 {
- if m != nil {
- return m.RemainingBytes
- }
- return 0
-}
-
-func (m *SnapshotResponse) GetBlob() []byte {
- if m != nil {
- return m.Blob
- }
- return nil
-}
-
-func (m *SnapshotResponse) GetVersion() string {
- if m != nil {
- return m.Version
- }
- return ""
-}
-
-type WatchRequest struct {
- // request_union is a request to either create a new watcher or cancel an existing watcher.
- //
- // Types that are valid to be assigned to RequestUnion:
- // *WatchRequest_CreateRequest
- // *WatchRequest_CancelRequest
- // *WatchRequest_ProgressRequest
- RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchRequest) Reset() { *m = WatchRequest{} }
-func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchRequest) ProtoMessage() {}
-func (*WatchRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{20}
-}
-func (m *WatchRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchRequest.Merge(m, src)
-}
-func (m *WatchRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchRequest proto.InternalMessageInfo
-
-type isWatchRequest_RequestUnion interface {
- isWatchRequest_RequestUnion()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type WatchRequest_CreateRequest struct {
- CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof" json:"create_request,omitempty"`
-}
-type WatchRequest_CancelRequest struct {
- CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof" json:"cancel_request,omitempty"`
-}
-type WatchRequest_ProgressRequest struct {
- ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof" json:"progress_request,omitempty"`
-}
-
-func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {}
-func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {}
-func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {}
-
-func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
- if m != nil {
- return m.RequestUnion
- }
- return nil
-}
-
-func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
- if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
- return x.CreateRequest
- }
- return nil
-}
-
-func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
- if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
- return x.CancelRequest
- }
- return nil
-}
-
-func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest {
- if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok {
- return x.ProgressRequest
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*WatchRequest) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*WatchRequest_CreateRequest)(nil),
- (*WatchRequest_CancelRequest)(nil),
- (*WatchRequest_ProgressRequest)(nil),
- }
-}
-
-type WatchCreateRequest struct {
- // key is the key to register for watching.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
- // only the key argument is watched. If range_end is equal to '\0', all keys greater than
- // or equal to the key argument are watched.
- // If the range_end is one bit larger than the given key,
- // then all keys with the prefix (the given key) will be watched.
- RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
- StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
- // progress_notify is set so that the etcd server will periodically send a WatchResponse with
- // no events to the new watcher if there are no recent events. It is useful when clients
- // wish to recover a disconnected watcher starting from a recent known revision.
- // The etcd server may decide how often it will send notifications based on current load.
- ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
- // filters filter the events at server side before it sends back to the watcher.
- Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
- // If prev_kv is set, created watcher gets the previous KV before the event happens.
- // If the previous KV is already compacted, nothing will be returned.
- PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- // If watch_id is provided and non-zero, it will be assigned to this watcher.
- // Since creating a watcher in etcd is not a synchronous operation,
- // this can be used ensure that ordering is correct when creating multiple
- // watchers on the same stream. Creating a watcher with an ID already in
- // use on the stream will cause an error to be returned.
- WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
- // fragment enables splitting large revisions into multiple watch responses.
- Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} }
-func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchCreateRequest) ProtoMessage() {}
-func (*WatchCreateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{21}
-}
-func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchCreateRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchCreateRequest.Merge(m, src)
-}
-func (m *WatchCreateRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchCreateRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo
-
-func (m *WatchCreateRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *WatchCreateRequest) GetRangeEnd() []byte {
- if m != nil {
- return m.RangeEnd
- }
- return nil
-}
-
-func (m *WatchCreateRequest) GetStartRevision() int64 {
- if m != nil {
- return m.StartRevision
- }
- return 0
-}
-
-func (m *WatchCreateRequest) GetProgressNotify() bool {
- if m != nil {
- return m.ProgressNotify
- }
- return false
-}
-
-func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
- if m != nil {
- return m.Filters
- }
- return nil
-}
-
-func (m *WatchCreateRequest) GetPrevKv() bool {
- if m != nil {
- return m.PrevKv
- }
- return false
-}
-
-func (m *WatchCreateRequest) GetWatchId() int64 {
- if m != nil {
- return m.WatchId
- }
- return 0
-}
-
-func (m *WatchCreateRequest) GetFragment() bool {
- if m != nil {
- return m.Fragment
- }
- return false
-}
-
-type WatchCancelRequest struct {
- // watch_id is the watcher id to cancel so that no more events are transmitted.
- WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} }
-func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchCancelRequest) ProtoMessage() {}
-func (*WatchCancelRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{22}
-}
-func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchCancelRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchCancelRequest.Merge(m, src)
-}
-func (m *WatchCancelRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchCancelRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo
-
-func (m *WatchCancelRequest) GetWatchId() int64 {
- if m != nil {
- return m.WatchId
- }
- return 0
-}
-
-// Requests the a watch stream progress status be sent in the watch response stream as soon as
-// possible.
-type WatchProgressRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} }
-func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchProgressRequest) ProtoMessage() {}
-func (*WatchProgressRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{23}
-}
-func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchProgressRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchProgressRequest.Merge(m, src)
-}
-func (m *WatchProgressRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchProgressRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo
-
-type WatchResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // watch_id is the ID of the watcher that corresponds to the response.
- WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
- // created is set to true if the response is for a create watch request.
- // The client should record the watch_id and expect to receive events for
- // the created watcher from the same stream.
- // All events sent to the created watcher will attach with the same watch_id.
- Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
- // canceled is set to true if the response is for a cancel watch request.
- // No further events will be sent to the canceled watcher.
- Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
- // compact_revision is set to the minimum index if a watcher tries to watch
- // at a compacted index.
- //
- // This happens when creating a watcher at a compacted revision or the watcher cannot
- // catch up with the progress of the key-value store.
- //
- // The client should treat the watcher as canceled and should not try to create any
- // watcher with the same start_revision again.
- CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
- // cancel_reason indicates the reason for canceling the watcher.
- CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
- // framgment is true if large watch response was split over multiple responses.
- Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"`
- Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchResponse) Reset() { *m = WatchResponse{} }
-func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
-func (*WatchResponse) ProtoMessage() {}
-func (*WatchResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{24}
-}
-func (m *WatchResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchResponse.Merge(m, src)
-}
-func (m *WatchResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchResponse proto.InternalMessageInfo
-
-func (m *WatchResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *WatchResponse) GetWatchId() int64 {
- if m != nil {
- return m.WatchId
- }
- return 0
-}
-
-func (m *WatchResponse) GetCreated() bool {
- if m != nil {
- return m.Created
- }
- return false
-}
-
-func (m *WatchResponse) GetCanceled() bool {
- if m != nil {
- return m.Canceled
- }
- return false
-}
-
-func (m *WatchResponse) GetCompactRevision() int64 {
- if m != nil {
- return m.CompactRevision
- }
- return 0
-}
-
-func (m *WatchResponse) GetCancelReason() string {
- if m != nil {
- return m.CancelReason
- }
- return ""
-}
-
-func (m *WatchResponse) GetFragment() bool {
- if m != nil {
- return m.Fragment
- }
- return false
-}
-
-func (m *WatchResponse) GetEvents() []*mvccpb.Event {
- if m != nil {
- return m.Events
- }
- return nil
-}
-
-type LeaseGrantRequest struct {
- // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
- TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
- // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
- ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} }
-func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseGrantRequest) ProtoMessage() {}
-func (*LeaseGrantRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{25}
-}
-func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseGrantRequest.Merge(m, src)
-}
-func (m *LeaseGrantRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseGrantRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo
-
-func (m *LeaseGrantRequest) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *LeaseGrantRequest) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type LeaseGrantResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // ID is the lease ID for the granted lease.
- ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
- // TTL is the server chosen lease time-to-live in seconds.
- TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
- Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} }
-func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseGrantResponse) ProtoMessage() {}
-func (*LeaseGrantResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{26}
-}
-func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseGrantResponse.Merge(m, src)
-}
-func (m *LeaseGrantResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseGrantResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo
-
-func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LeaseGrantResponse) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *LeaseGrantResponse) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *LeaseGrantResponse) GetError() string {
- if m != nil {
- return m.Error
- }
- return ""
-}
-
-type LeaseRevokeRequest struct {
- // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} }
-func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseRevokeRequest) ProtoMessage() {}
-func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{27}
-}
-func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseRevokeRequest.Merge(m, src)
-}
-func (m *LeaseRevokeRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseRevokeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo
-
-func (m *LeaseRevokeRequest) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type LeaseRevokeResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} }
-func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseRevokeResponse) ProtoMessage() {}
-func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{28}
-}
-func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseRevokeResponse.Merge(m, src)
-}
-func (m *LeaseRevokeResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseRevokeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo
-
-func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type LeaseCheckpoint struct {
- // ID is the lease ID to checkpoint.
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // Remaining_TTL is the remaining time until expiry of the lease.
- Remaining_TTL int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} }
-func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) }
-func (*LeaseCheckpoint) ProtoMessage() {}
-func (*LeaseCheckpoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{29}
-}
-func (m *LeaseCheckpoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseCheckpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseCheckpoint.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseCheckpoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseCheckpoint.Merge(m, src)
-}
-func (m *LeaseCheckpoint) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseCheckpoint) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseCheckpoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseCheckpoint proto.InternalMessageInfo
-
-func (m *LeaseCheckpoint) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *LeaseCheckpoint) GetRemaining_TTL() int64 {
- if m != nil {
- return m.Remaining_TTL
- }
- return 0
-}
-
-type LeaseCheckpointRequest struct {
- Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints,proto3" json:"checkpoints,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} }
-func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseCheckpointRequest) ProtoMessage() {}
-func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{30}
-}
-func (m *LeaseCheckpointRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseCheckpointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseCheckpointRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseCheckpointRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseCheckpointRequest.Merge(m, src)
-}
-func (m *LeaseCheckpointRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseCheckpointRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseCheckpointRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseCheckpointRequest proto.InternalMessageInfo
-
-func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint {
- if m != nil {
- return m.Checkpoints
- }
- return nil
-}
-
-type LeaseCheckpointResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} }
-func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseCheckpointResponse) ProtoMessage() {}
-func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{31}
-}
-func (m *LeaseCheckpointResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseCheckpointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseCheckpointResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseCheckpointResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseCheckpointResponse.Merge(m, src)
-}
-func (m *LeaseCheckpointResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseCheckpointResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseCheckpointResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseCheckpointResponse proto.InternalMessageInfo
-
-func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type LeaseKeepAliveRequest struct {
- // ID is the lease ID for the lease to keep alive.
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} }
-func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseKeepAliveRequest) ProtoMessage() {}
-func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{32}
-}
-func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src)
-}
-func (m *LeaseKeepAliveRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo
-
-func (m *LeaseKeepAliveRequest) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type LeaseKeepAliveResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // ID is the lease ID from the keep alive request.
- ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
- // TTL is the new time-to-live for the lease.
- TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} }
-func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseKeepAliveResponse) ProtoMessage() {}
-func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{33}
-}
-func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src)
-}
-func (m *LeaseKeepAliveResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo
-
-func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LeaseKeepAliveResponse) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *LeaseKeepAliveResponse) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-type LeaseTimeToLiveRequest struct {
- // ID is the lease ID for the lease.
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // keys is true to query all the keys attached to this lease.
- Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} }
-func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseTimeToLiveRequest) ProtoMessage() {}
-func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{34}
-}
-func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src)
-}
-func (m *LeaseTimeToLiveRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo
-
-func (m *LeaseTimeToLiveRequest) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *LeaseTimeToLiveRequest) GetKeys() bool {
- if m != nil {
- return m.Keys
- }
- return false
-}
-
-type LeaseTimeToLiveResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // ID is the lease ID from the keep alive request.
- ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
- // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
- TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
- // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
- GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
- // Keys is the list of keys attached to this lease.
- Keys [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} }
-func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseTimeToLiveResponse) ProtoMessage() {}
-func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{35}
-}
-func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src)
-}
-func (m *LeaseTimeToLiveResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo
-
-func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LeaseTimeToLiveResponse) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
- if m != nil {
- return m.GrantedTTL
- }
- return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
- if m != nil {
- return m.Keys
- }
- return nil
-}
-
-type LeaseLeasesRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} }
-func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseLeasesRequest) ProtoMessage() {}
-func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{36}
-}
-func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseLeasesRequest.Merge(m, src)
-}
-func (m *LeaseLeasesRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseLeasesRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo
-
-type LeaseStatus struct {
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseStatus) Reset() { *m = LeaseStatus{} }
-func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
-func (*LeaseStatus) ProtoMessage() {}
-func (*LeaseStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{37}
-}
-func (m *LeaseStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseStatus.Merge(m, src)
-}
-func (m *LeaseStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo
-
-func (m *LeaseStatus) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type LeaseLeasesResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} }
-func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseLeasesResponse) ProtoMessage() {}
-func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{38}
-}
-func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseLeasesResponse.Merge(m, src)
-}
-func (m *LeaseLeasesResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseLeasesResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo
-
-func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
- if m != nil {
- return m.Leases
- }
- return nil
-}
-
-type Member struct {
- // ID is the member ID for this member.
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
- Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
- // peerURLs is the list of URLs the member exposes to the cluster for communication.
- PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
- // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
- ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"`
- // isLearner indicates if the member is raft learner.
- IsLearner bool `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Member) Reset() { *m = Member{} }
-func (m *Member) String() string { return proto.CompactTextString(m) }
-func (*Member) ProtoMessage() {}
-func (*Member) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{39}
-}
-func (m *Member) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Member.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Member) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Member.Merge(m, src)
-}
-func (m *Member) XXX_Size() int {
- return m.Size()
-}
-func (m *Member) XXX_DiscardUnknown() {
- xxx_messageInfo_Member.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Member proto.InternalMessageInfo
-
-func (m *Member) GetID() uint64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *Member) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Member) GetPeerURLs() []string {
- if m != nil {
- return m.PeerURLs
- }
- return nil
-}
-
-func (m *Member) GetClientURLs() []string {
- if m != nil {
- return m.ClientURLs
- }
- return nil
-}
-
-func (m *Member) GetIsLearner() bool {
- if m != nil {
- return m.IsLearner
- }
- return false
-}
-
-type MemberAddRequest struct {
- // peerURLs is the list of URLs the added member will use to communicate with the cluster.
- PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
- // isLearner indicates if the added member is raft learner.
- IsLearner bool `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} }
-func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberAddRequest) ProtoMessage() {}
-func (*MemberAddRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{40}
-}
-func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberAddRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberAddRequest.Merge(m, src)
-}
-func (m *MemberAddRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberAddRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo
-
-func (m *MemberAddRequest) GetPeerURLs() []string {
- if m != nil {
- return m.PeerURLs
- }
- return nil
-}
-
-func (m *MemberAddRequest) GetIsLearner() bool {
- if m != nil {
- return m.IsLearner
- }
- return false
-}
-
-type MemberAddResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // member is the member information for the added member.
- Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"`
- // members is a list of all members after adding the new member.
- Members []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} }
-func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberAddResponse) ProtoMessage() {}
-func (*MemberAddResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{41}
-}
-func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberAddResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberAddResponse.Merge(m, src)
-}
-func (m *MemberAddResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberAddResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo
-
-func (m *MemberAddResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *MemberAddResponse) GetMember() *Member {
- if m != nil {
- return m.Member
- }
- return nil
-}
-
-func (m *MemberAddResponse) GetMembers() []*Member {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-type MemberRemoveRequest struct {
- // ID is the member ID of the member to remove.
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} }
-func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberRemoveRequest) ProtoMessage() {}
-func (*MemberRemoveRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{42}
-}
-func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberRemoveRequest.Merge(m, src)
-}
-func (m *MemberRemoveRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberRemoveRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo
-
-func (m *MemberRemoveRequest) GetID() uint64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type MemberRemoveResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // members is a list of all members after removing the member.
- Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} }
-func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberRemoveResponse) ProtoMessage() {}
-func (*MemberRemoveResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{43}
-}
-func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberRemoveResponse.Merge(m, src)
-}
-func (m *MemberRemoveResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberRemoveResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo
-
-func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *MemberRemoveResponse) GetMembers() []*Member {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-type MemberUpdateRequest struct {
- // ID is the member ID of the member to update.
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // peerURLs is the new list of URLs the member will use to communicate with the cluster.
- PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} }
-func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberUpdateRequest) ProtoMessage() {}
-func (*MemberUpdateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{44}
-}
-func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberUpdateRequest.Merge(m, src)
-}
-func (m *MemberUpdateRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberUpdateRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo
-
-func (m *MemberUpdateRequest) GetID() uint64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *MemberUpdateRequest) GetPeerURLs() []string {
- if m != nil {
- return m.PeerURLs
- }
- return nil
-}
-
-type MemberUpdateResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // members is a list of all members after updating the member.
- Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} }
-func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberUpdateResponse) ProtoMessage() {}
-func (*MemberUpdateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{45}
-}
-func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberUpdateResponse.Merge(m, src)
-}
-func (m *MemberUpdateResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberUpdateResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo
-
-func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *MemberUpdateResponse) GetMembers() []*Member {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-type MemberListRequest struct {
- Linearizable bool `protobuf:"varint,1,opt,name=linearizable,proto3" json:"linearizable,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberListRequest) Reset() { *m = MemberListRequest{} }
-func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberListRequest) ProtoMessage() {}
-func (*MemberListRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{46}
-}
-func (m *MemberListRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberListRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberListRequest.Merge(m, src)
-}
-func (m *MemberListRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberListRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo
-
-func (m *MemberListRequest) GetLinearizable() bool {
- if m != nil {
- return m.Linearizable
- }
- return false
-}
-
-type MemberListResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // members is a list of all members associated with the cluster.
- Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberListResponse) Reset() { *m = MemberListResponse{} }
-func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberListResponse) ProtoMessage() {}
-func (*MemberListResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{47}
-}
-func (m *MemberListResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberListResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberListResponse.Merge(m, src)
-}
-func (m *MemberListResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberListResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo
-
-func (m *MemberListResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *MemberListResponse) GetMembers() []*Member {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-type MemberPromoteRequest struct {
- // ID is the member ID of the member to promote.
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberPromoteRequest) Reset() { *m = MemberPromoteRequest{} }
-func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberPromoteRequest) ProtoMessage() {}
-func (*MemberPromoteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{48}
-}
-func (m *MemberPromoteRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberPromoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberPromoteRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberPromoteRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberPromoteRequest.Merge(m, src)
-}
-func (m *MemberPromoteRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberPromoteRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberPromoteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberPromoteRequest proto.InternalMessageInfo
-
-func (m *MemberPromoteRequest) GetID() uint64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type MemberPromoteResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // members is a list of all members after promoting the member.
- Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberPromoteResponse) Reset() { *m = MemberPromoteResponse{} }
-func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberPromoteResponse) ProtoMessage() {}
-func (*MemberPromoteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{49}
-}
-func (m *MemberPromoteResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberPromoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberPromoteResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberPromoteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberPromoteResponse.Merge(m, src)
-}
-func (m *MemberPromoteResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberPromoteResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberPromoteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberPromoteResponse proto.InternalMessageInfo
-
-func (m *MemberPromoteResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *MemberPromoteResponse) GetMembers() []*Member {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-type DefragmentRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} }
-func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
-func (*DefragmentRequest) ProtoMessage() {}
-func (*DefragmentRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{50}
-}
-func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DefragmentRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DefragmentRequest.Merge(m, src)
-}
-func (m *DefragmentRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DefragmentRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DefragmentRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo
-
-type DefragmentResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} }
-func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
-func (*DefragmentResponse) ProtoMessage() {}
-func (*DefragmentResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{51}
-}
-func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DefragmentResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DefragmentResponse.Merge(m, src)
-}
-func (m *DefragmentResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *DefragmentResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_DefragmentResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo
-
-func (m *DefragmentResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type MoveLeaderRequest struct {
- // targetID is the node ID for the new leader.
- TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} }
-func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
-func (*MoveLeaderRequest) ProtoMessage() {}
-func (*MoveLeaderRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{52}
-}
-func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MoveLeaderRequest.Merge(m, src)
-}
-func (m *MoveLeaderRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MoveLeaderRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo
-
-func (m *MoveLeaderRequest) GetTargetID() uint64 {
- if m != nil {
- return m.TargetID
- }
- return 0
-}
-
-type MoveLeaderResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} }
-func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
-func (*MoveLeaderResponse) ProtoMessage() {}
-func (*MoveLeaderResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{53}
-}
-func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MoveLeaderResponse.Merge(m, src)
-}
-func (m *MoveLeaderResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MoveLeaderResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo
-
-func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AlarmRequest struct {
- // action is the kind of alarm request to issue. The action
- // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
- // raised alarm.
- Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
- // memberID is the ID of the member associated with the alarm. If memberID is 0, the
- // alarm request covers all members.
- MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
- // alarm is the type of alarm to consider for this request.
- Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AlarmRequest) Reset() { *m = AlarmRequest{} }
-func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
-func (*AlarmRequest) ProtoMessage() {}
-func (*AlarmRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{54}
-}
-func (m *AlarmRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AlarmRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AlarmRequest.Merge(m, src)
-}
-func (m *AlarmRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AlarmRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AlarmRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo
-
-func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
- if m != nil {
- return m.Action
- }
- return AlarmRequest_GET
-}
-
-func (m *AlarmRequest) GetMemberID() uint64 {
- if m != nil {
- return m.MemberID
- }
- return 0
-}
-
-func (m *AlarmRequest) GetAlarm() AlarmType {
- if m != nil {
- return m.Alarm
- }
- return AlarmType_NONE
-}
-
-type AlarmMember struct {
- // memberID is the ID of the member associated with the raised alarm.
- MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
- // alarm is the type of alarm which has been raised.
- Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AlarmMember) Reset() { *m = AlarmMember{} }
-func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
-func (*AlarmMember) ProtoMessage() {}
-func (*AlarmMember) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{55}
-}
-func (m *AlarmMember) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AlarmMember) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AlarmMember.Merge(m, src)
-}
-func (m *AlarmMember) XXX_Size() int {
- return m.Size()
-}
-func (m *AlarmMember) XXX_DiscardUnknown() {
- xxx_messageInfo_AlarmMember.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmMember proto.InternalMessageInfo
-
-func (m *AlarmMember) GetMemberID() uint64 {
- if m != nil {
- return m.MemberID
- }
- return 0
-}
-
-func (m *AlarmMember) GetAlarm() AlarmType {
- if m != nil {
- return m.Alarm
- }
- return AlarmType_NONE
-}
-
-type AlarmResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // alarms is a list of alarms associated with the alarm request.
- Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AlarmResponse) Reset() { *m = AlarmResponse{} }
-func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
-func (*AlarmResponse) ProtoMessage() {}
-func (*AlarmResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{56}
-}
-func (m *AlarmResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AlarmResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AlarmResponse.Merge(m, src)
-}
-func (m *AlarmResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AlarmResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AlarmResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo
-
-func (m *AlarmResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AlarmResponse) GetAlarms() []*AlarmMember {
- if m != nil {
- return m.Alarms
- }
- return nil
-}
-
-type DowngradeRequest struct {
- // action is the kind of downgrade request to issue. The action may
- // VALIDATE the target version, DOWNGRADE the cluster version,
- // or CANCEL the current downgrading job.
- Action DowngradeRequest_DowngradeAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.DowngradeRequest_DowngradeAction" json:"action,omitempty"`
- // version is the target version to downgrade.
- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DowngradeRequest) Reset() { *m = DowngradeRequest{} }
-func (m *DowngradeRequest) String() string { return proto.CompactTextString(m) }
-func (*DowngradeRequest) ProtoMessage() {}
-func (*DowngradeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{57}
-}
-func (m *DowngradeRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DowngradeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DowngradeRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DowngradeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DowngradeRequest.Merge(m, src)
-}
-func (m *DowngradeRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DowngradeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DowngradeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DowngradeRequest proto.InternalMessageInfo
-
-func (m *DowngradeRequest) GetAction() DowngradeRequest_DowngradeAction {
- if m != nil {
- return m.Action
- }
- return DowngradeRequest_VALIDATE
-}
-
-func (m *DowngradeRequest) GetVersion() string {
- if m != nil {
- return m.Version
- }
- return ""
-}
-
-type DowngradeResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // version is the current cluster version.
- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DowngradeResponse) Reset() { *m = DowngradeResponse{} }
-func (m *DowngradeResponse) String() string { return proto.CompactTextString(m) }
-func (*DowngradeResponse) ProtoMessage() {}
-func (*DowngradeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{58}
-}
-func (m *DowngradeResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DowngradeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DowngradeResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DowngradeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DowngradeResponse.Merge(m, src)
-}
-func (m *DowngradeResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *DowngradeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_DowngradeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DowngradeResponse proto.InternalMessageInfo
-
-func (m *DowngradeResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *DowngradeResponse) GetVersion() string {
- if m != nil {
- return m.Version
- }
- return ""
-}
-
-type StatusRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StatusRequest) Reset() { *m = StatusRequest{} }
-func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
-func (*StatusRequest) ProtoMessage() {}
-func (*StatusRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{59}
-}
-func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *StatusRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatusRequest.Merge(m, src)
-}
-func (m *StatusRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *StatusRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_StatusRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusRequest proto.InternalMessageInfo
-
-type StatusResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // version is the cluster protocol version used by the responding member.
- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
- // dbSize is the size of the backend database physically allocated, in bytes, of the responding member.
- DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
- // leader is the member ID which the responding member believes is the current leader.
- Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
- // raftIndex is the current raft committed index of the responding member.
- RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
- // raftTerm is the current raft term of the responding member.
- RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
- // raftAppliedIndex is the current raft applied index of the responding member.
- RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"`
- // errors contains alarm/health information and status.
- Errors []string `protobuf:"bytes,8,rep,name=errors,proto3" json:"errors,omitempty"`
- // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
- DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"`
- // isLearner indicates if the member is raft learner.
- IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
- // storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version.
- StorageVersion string `protobuf:"bytes,11,opt,name=storageVersion,proto3" json:"storageVersion,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StatusResponse) Reset() { *m = StatusResponse{} }
-func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
-func (*StatusResponse) ProtoMessage() {}
-func (*StatusResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{60}
-}
-func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *StatusResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatusResponse.Merge(m, src)
-}
-func (m *StatusResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *StatusResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_StatusResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusResponse proto.InternalMessageInfo
-
-func (m *StatusResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *StatusResponse) GetVersion() string {
- if m != nil {
- return m.Version
- }
- return ""
-}
-
-func (m *StatusResponse) GetDbSize() int64 {
- if m != nil {
- return m.DbSize
- }
- return 0
-}
-
-func (m *StatusResponse) GetLeader() uint64 {
- if m != nil {
- return m.Leader
- }
- return 0
-}
-
-func (m *StatusResponse) GetRaftIndex() uint64 {
- if m != nil {
- return m.RaftIndex
- }
- return 0
-}
-
-func (m *StatusResponse) GetRaftTerm() uint64 {
- if m != nil {
- return m.RaftTerm
- }
- return 0
-}
-
-func (m *StatusResponse) GetRaftAppliedIndex() uint64 {
- if m != nil {
- return m.RaftAppliedIndex
- }
- return 0
-}
-
-func (m *StatusResponse) GetErrors() []string {
- if m != nil {
- return m.Errors
- }
- return nil
-}
-
-func (m *StatusResponse) GetDbSizeInUse() int64 {
- if m != nil {
- return m.DbSizeInUse
- }
- return 0
-}
-
-func (m *StatusResponse) GetIsLearner() bool {
- if m != nil {
- return m.IsLearner
- }
- return false
-}
-
-func (m *StatusResponse) GetStorageVersion() string {
- if m != nil {
- return m.StorageVersion
- }
- return ""
-}
-
-type AuthEnableRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} }
-func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthEnableRequest) ProtoMessage() {}
-func (*AuthEnableRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{61}
-}
-func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthEnableRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthEnableRequest.Merge(m, src)
-}
-func (m *AuthEnableRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthEnableRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo
-
-type AuthDisableRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} }
-func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthDisableRequest) ProtoMessage() {}
-func (*AuthDisableRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{62}
-}
-func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthDisableRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthDisableRequest.Merge(m, src)
-}
-func (m *AuthDisableRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthDisableRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo
-
-type AuthStatusRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthStatusRequest) Reset() { *m = AuthStatusRequest{} }
-func (m *AuthStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthStatusRequest) ProtoMessage() {}
-func (*AuthStatusRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{63}
-}
-func (m *AuthStatusRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthStatusRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthStatusRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthStatusRequest.Merge(m, src)
-}
-func (m *AuthStatusRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthStatusRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthStatusRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthStatusRequest proto.InternalMessageInfo
-
-type AuthenticateRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} }
-func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthenticateRequest) ProtoMessage() {}
-func (*AuthenticateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{64}
-}
-func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthenticateRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthenticateRequest.Merge(m, src)
-}
-func (m *AuthenticateRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthenticateRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo
-
-func (m *AuthenticateRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthenticateRequest) GetPassword() string {
- if m != nil {
- return m.Password
- }
- return ""
-}
-
-type AuthUserAddRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- Options *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
- HashedPassword string `protobuf:"bytes,4,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} }
-func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserAddRequest) ProtoMessage() {}
-func (*AuthUserAddRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{65}
-}
-func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserAddRequest.Merge(m, src)
-}
-func (m *AuthUserAddRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserAddRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo
-
-func (m *AuthUserAddRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthUserAddRequest) GetPassword() string {
- if m != nil {
- return m.Password
- }
- return ""
-}
-
-func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions {
- if m != nil {
- return m.Options
- }
- return nil
-}
-
-func (m *AuthUserAddRequest) GetHashedPassword() string {
- if m != nil {
- return m.HashedPassword
- }
- return ""
-}
-
-type AuthUserGetRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} }
-func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGetRequest) ProtoMessage() {}
-func (*AuthUserGetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{66}
-}
-func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserGetRequest.Merge(m, src)
-}
-func (m *AuthUserGetRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserGetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo
-
-func (m *AuthUserGetRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-type AuthUserDeleteRequest struct {
- // name is the name of the user to delete.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} }
-func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserDeleteRequest) ProtoMessage() {}
-func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{67}
-}
-func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src)
-}
-func (m *AuthUserDeleteRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo
-
-func (m *AuthUserDeleteRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-type AuthUserChangePasswordRequest struct {
- // name is the name of the user whose password is being changed.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // password is the new password for the user. Note that this field will be removed in the API layer.
- Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.
- HashedPassword string `protobuf:"bytes,3,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} }
-func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserChangePasswordRequest) ProtoMessage() {}
-func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{68}
-}
-func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src)
-}
-func (m *AuthUserChangePasswordRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo
-
-func (m *AuthUserChangePasswordRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthUserChangePasswordRequest) GetPassword() string {
- if m != nil {
- return m.Password
- }
- return ""
-}
-
-func (m *AuthUserChangePasswordRequest) GetHashedPassword() string {
- if m != nil {
- return m.HashedPassword
- }
- return ""
-}
-
-type AuthUserGrantRoleRequest struct {
- // user is the name of the user which should be granted a given role.
- User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
- // role is the name of the role to grant to the user.
- Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} }
-func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGrantRoleRequest) ProtoMessage() {}
-func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{69}
-}
-func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src)
-}
-func (m *AuthUserGrantRoleRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo
-
-func (m *AuthUserGrantRoleRequest) GetUser() string {
- if m != nil {
- return m.User
- }
- return ""
-}
-
-func (m *AuthUserGrantRoleRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-type AuthUserRevokeRoleRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} }
-func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserRevokeRoleRequest) ProtoMessage() {}
-func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{70}
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src)
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo
-
-func (m *AuthUserRevokeRoleRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthUserRevokeRoleRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-type AuthRoleAddRequest struct {
- // name is the name of the role to add to the authentication system.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} }
-func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleAddRequest) ProtoMessage() {}
-func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{71}
-}
-func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleAddRequest.Merge(m, src)
-}
-func (m *AuthRoleAddRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleAddRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo
-
-func (m *AuthRoleAddRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-type AuthRoleGetRequest struct {
- Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} }
-func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGetRequest) ProtoMessage() {}
-func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{72}
-}
-func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleGetRequest.Merge(m, src)
-}
-func (m *AuthRoleGetRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleGetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo
-
-func (m *AuthRoleGetRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-type AuthUserListRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} }
-func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserListRequest) ProtoMessage() {}
-func (*AuthUserListRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{73}
-}
-func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserListRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserListRequest.Merge(m, src)
-}
-func (m *AuthUserListRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserListRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo
-
-type AuthRoleListRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} }
-func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleListRequest) ProtoMessage() {}
-func (*AuthRoleListRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{74}
-}
-func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleListRequest.Merge(m, src)
-}
-func (m *AuthRoleListRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleListRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo
-
-type AuthRoleDeleteRequest struct {
- Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} }
-func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleDeleteRequest) ProtoMessage() {}
-func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{75}
-}
-func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src)
-}
-func (m *AuthRoleDeleteRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo
-
-func (m *AuthRoleDeleteRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-type AuthRoleGrantPermissionRequest struct {
- // name is the name of the role which will be granted the permission.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // perm is the permission to grant to the role.
- Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} }
-func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGrantPermissionRequest) ProtoMessage() {}
-func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{76}
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src)
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo
-
-func (m *AuthRoleGrantPermissionRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
- if m != nil {
- return m.Perm
- }
- return nil
-}
-
-type AuthRoleRevokePermissionRequest struct {
- Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
- Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
- RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} }
-func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleRevokePermissionRequest) ProtoMessage() {}
-func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{77}
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src)
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo
-
-func (m *AuthRoleRevokePermissionRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-func (m *AuthRoleRevokePermissionRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte {
- if m != nil {
- return m.RangeEnd
- }
- return nil
-}
-
-type AuthEnableResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} }
-func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthEnableResponse) ProtoMessage() {}
-func (*AuthEnableResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{78}
-}
-func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthEnableResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthEnableResponse.Merge(m, src)
-}
-func (m *AuthEnableResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthEnableResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo
-
-func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthDisableResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} }
-func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthDisableResponse) ProtoMessage() {}
-func (*AuthDisableResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{79}
-}
-func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthDisableResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthDisableResponse.Merge(m, src)
-}
-func (m *AuthDisableResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthDisableResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo
-
-func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthStatusResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"`
- // authRevision is the current revision of auth store
- AuthRevision uint64 `protobuf:"varint,3,opt,name=authRevision,proto3" json:"authRevision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthStatusResponse) Reset() { *m = AuthStatusResponse{} }
-func (m *AuthStatusResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthStatusResponse) ProtoMessage() {}
-func (*AuthStatusResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{80}
-}
-func (m *AuthStatusResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthStatusResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthStatusResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthStatusResponse.Merge(m, src)
-}
-func (m *AuthStatusResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthStatusResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthStatusResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthStatusResponse proto.InternalMessageInfo
-
-func (m *AuthStatusResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthStatusResponse) GetEnabled() bool {
- if m != nil {
- return m.Enabled
- }
- return false
-}
-
-func (m *AuthStatusResponse) GetAuthRevision() uint64 {
- if m != nil {
- return m.AuthRevision
- }
- return 0
-}
-
-type AuthenticateResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // token is an authorized token that can be used in succeeding RPCs
- Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} }
-func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthenticateResponse) ProtoMessage() {}
-func (*AuthenticateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{81}
-}
-func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthenticateResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthenticateResponse.Merge(m, src)
-}
-func (m *AuthenticateResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthenticateResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo
-
-func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthenticateResponse) GetToken() string {
- if m != nil {
- return m.Token
- }
- return ""
-}
-
-type AuthUserAddResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} }
-func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserAddResponse) ProtoMessage() {}
-func (*AuthUserAddResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{82}
-}
-func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserAddResponse.Merge(m, src)
-}
-func (m *AuthUserAddResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserAddResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo
-
-func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthUserGetResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} }
-func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGetResponse) ProtoMessage() {}
-func (*AuthUserGetResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{83}
-}
-func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserGetResponse.Merge(m, src)
-}
-func (m *AuthUserGetResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserGetResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo
-
-func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthUserGetResponse) GetRoles() []string {
- if m != nil {
- return m.Roles
- }
- return nil
-}
-
-type AuthUserDeleteResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} }
-func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserDeleteResponse) ProtoMessage() {}
-func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{84}
-}
-func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src)
-}
-func (m *AuthUserDeleteResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo
-
-func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthUserChangePasswordResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} }
-func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserChangePasswordResponse) ProtoMessage() {}
-func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{85}
-}
-func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src)
-}
-func (m *AuthUserChangePasswordResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo
-
-func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthUserGrantRoleResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} }
-func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGrantRoleResponse) ProtoMessage() {}
-func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{86}
-}
-func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src)
-}
-func (m *AuthUserGrantRoleResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo
-
-func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthUserRevokeRoleResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} }
-func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserRevokeRoleResponse) ProtoMessage() {}
-func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{87}
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src)
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo
-
-func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthRoleAddResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} }
-func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleAddResponse) ProtoMessage() {}
-func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{88}
-}
-func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleAddResponse.Merge(m, src)
-}
-func (m *AuthRoleAddResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleAddResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo
-
-func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthRoleGetResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} }
-func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGetResponse) ProtoMessage() {}
-func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{89}
-}
-func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleGetResponse.Merge(m, src)
-}
-func (m *AuthRoleGetResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleGetResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo
-
-func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
- if m != nil {
- return m.Perm
- }
- return nil
-}
-
-type AuthRoleListResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} }
-func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleListResponse) ProtoMessage() {}
-func (*AuthRoleListResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{90}
-}
-func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleListResponse.Merge(m, src)
-}
-func (m *AuthRoleListResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleListResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo
-
-func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthRoleListResponse) GetRoles() []string {
- if m != nil {
- return m.Roles
- }
- return nil
-}
-
-type AuthUserListResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Users []string `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} }
-func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserListResponse) ProtoMessage() {}
-func (*AuthUserListResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{91}
-}
-func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserListResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserListResponse.Merge(m, src)
-}
-func (m *AuthUserListResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserListResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo
-
-func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthUserListResponse) GetUsers() []string {
- if m != nil {
- return m.Users
- }
- return nil
-}
-
-type AuthRoleDeleteResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} }
-func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleDeleteResponse) ProtoMessage() {}
-func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{92}
-}
-func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src)
-}
-func (m *AuthRoleDeleteResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo
-
-func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthRoleGrantPermissionResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} }
-func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGrantPermissionResponse) ProtoMessage() {}
-func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{93}
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src)
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo
-
-func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthRoleRevokePermissionResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} }
-func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleRevokePermissionResponse) ProtoMessage() {}
-func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{94}
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src)
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo
-
-func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
- proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
- proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
- proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
- proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
- proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
- proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
- proto.RegisterEnum("etcdserverpb.DowngradeRequest_DowngradeAction", DowngradeRequest_DowngradeAction_name, DowngradeRequest_DowngradeAction_value)
- proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
- proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
- proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
- proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
- proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
- proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
- proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
- proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
- proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
- proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
- proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
- proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
- proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
- proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
- proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
- proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
- proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
- proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
- proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
- proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
- proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
- proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
- proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
- proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest")
- proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
- proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
- proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
- proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
- proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
- proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint")
- proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest")
- proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse")
- proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
- proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
- proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
- proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
- proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
- proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
- proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
- proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
- proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
- proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
- proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
- proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
- proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
- proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
- proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
- proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
- proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest")
- proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse")
- proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
- proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
- proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
- proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
- proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
- proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
- proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
- proto.RegisterType((*DowngradeRequest)(nil), "etcdserverpb.DowngradeRequest")
- proto.RegisterType((*DowngradeResponse)(nil), "etcdserverpb.DowngradeResponse")
- proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
- proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
- proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
- proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
- proto.RegisterType((*AuthStatusRequest)(nil), "etcdserverpb.AuthStatusRequest")
- proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
- proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
- proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
- proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
- proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
- proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
- proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
- proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
- proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
- proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
- proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
- proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
- proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
- proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
- proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
- proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
- proto.RegisterType((*AuthStatusResponse)(nil), "etcdserverpb.AuthStatusResponse")
- proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
- proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
- proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
- proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
- proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
- proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
- proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
- proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
- proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
- proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
- proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
- proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
- proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
- proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
-}
-
-func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) }
-
-var fileDescriptor_77a6da22d6a3feb1 = []byte{
- // 4424 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x3c, 0xdf, 0x6f, 0x1c, 0x49,
- 0x5a, 0xee, 0x19, 0xcf, 0x8c, 0xe7, 0x9b, 0xf1, 0x78, 0x5c, 0x71, 0xb2, 0x93, 0xd9, 0xc4, 0xf1,
- 0x76, 0x36, 0xbb, 0xd9, 0xec, 0xae, 0x9d, 0xd8, 0xc9, 0x2d, 0x04, 0xed, 0x72, 0x13, 0x7b, 0x36,
- 0x31, 0x71, 0x6c, 0x5f, 0x7b, 0x92, 0xbd, 0x5d, 0xa4, 0x1b, 0xda, 0x33, 0x15, 0xbb, 0xcf, 0x33,
- 0xdd, 0x73, 0xdd, 0x3d, 0x8e, 0x7d, 0x3c, 0xdc, 0x71, 0x70, 0x9c, 0x0e, 0xa4, 0x93, 0x38, 0x24,
- 0x74, 0x42, 0xf0, 0x82, 0x90, 0xe0, 0xe1, 0x40, 0xf0, 0xc0, 0x03, 0x02, 0x89, 0x07, 0x78, 0x80,
- 0x07, 0x24, 0x24, 0xfe, 0x01, 0x58, 0xee, 0x89, 0x3f, 0x02, 0xa1, 0xfa, 0xd5, 0x55, 0xdd, 0x5d,
- 0x6d, 0x7b, 0xcf, 0x5e, 0xdd, 0xcb, 0x66, 0xba, 0xea, 0xfb, 0x55, 0xdf, 0x57, 0xdf, 0xf7, 0x55,
- 0x7d, 0x5f, 0x79, 0xa1, 0xec, 0x8f, 0x7a, 0x8b, 0x23, 0xdf, 0x0b, 0x3d, 0x54, 0xc5, 0x61, 0xaf,
- 0x1f, 0x60, 0xff, 0x10, 0xfb, 0xa3, 0xdd, 0xe6, 0xdc, 0x9e, 0xb7, 0xe7, 0xd1, 0x89, 0x25, 0xf2,
- 0x8b, 0xc1, 0x34, 0x1b, 0x04, 0x66, 0xc9, 0x1e, 0x39, 0x4b, 0xc3, 0xc3, 0x5e, 0x6f, 0xb4, 0xbb,
- 0x74, 0x70, 0xc8, 0x67, 0x9a, 0xd1, 0x8c, 0x3d, 0x0e, 0xf7, 0x47, 0xbb, 0xf4, 0x1f, 0x3e, 0xb7,
- 0x10, 0xcd, 0x1d, 0x62, 0x3f, 0x70, 0x3c, 0x77, 0xb4, 0x2b, 0x7e, 0x71, 0x88, 0x6b, 0x7b, 0x9e,
- 0xb7, 0x37, 0xc0, 0x0c, 0xdf, 0x75, 0xbd, 0xd0, 0x0e, 0x1d, 0xcf, 0x0d, 0xd8, 0xac, 0xf9, 0x23,
- 0x03, 0x6a, 0x16, 0x0e, 0x46, 0x9e, 0x1b, 0xe0, 0x27, 0xd8, 0xee, 0x63, 0x1f, 0x5d, 0x07, 0xe8,
- 0x0d, 0xc6, 0x41, 0x88, 0xfd, 0xae, 0xd3, 0x6f, 0x18, 0x0b, 0xc6, 0xed, 0x49, 0xab, 0xcc, 0x47,
- 0xd6, 0xfb, 0xe8, 0x75, 0x28, 0x0f, 0xf1, 0x70, 0x97, 0xcd, 0xe6, 0xe8, 0xec, 0x14, 0x1b, 0x58,
- 0xef, 0xa3, 0x26, 0x4c, 0xf9, 0xf8, 0xd0, 0x21, 0xec, 0x1b, 0xf9, 0x05, 0xe3, 0x76, 0xde, 0x8a,
- 0xbe, 0x09, 0xa2, 0x6f, 0xbf, 0x0c, 0xbb, 0x21, 0xf6, 0x87, 0x8d, 0x49, 0x86, 0x48, 0x06, 0x3a,
- 0xd8, 0x1f, 0x3e, 0x2c, 0x7d, 0xef, 0xef, 0x1a, 0xf9, 0x95, 0xc5, 0xbb, 0xe6, 0x3f, 0x17, 0xa0,
- 0x6a, 0xd9, 0xee, 0x1e, 0xb6, 0xf0, 0xb7, 0xc6, 0x38, 0x08, 0x51, 0x1d, 0xf2, 0x07, 0xf8, 0x98,
- 0xca, 0x51, 0xb5, 0xc8, 0x4f, 0x46, 0xc8, 0xdd, 0xc3, 0x5d, 0xec, 0x32, 0x09, 0xaa, 0x84, 0x90,
- 0xbb, 0x87, 0xdb, 0x6e, 0x1f, 0xcd, 0x41, 0x61, 0xe0, 0x0c, 0x9d, 0x90, 0xb3, 0x67, 0x1f, 0x31,
- 0xb9, 0x26, 0x13, 0x72, 0xad, 0x02, 0x04, 0x9e, 0x1f, 0x76, 0x3d, 0xbf, 0x8f, 0xfd, 0x46, 0x61,
- 0xc1, 0xb8, 0x5d, 0x5b, 0x7e, 0x73, 0x51, 0xb5, 0xd8, 0xa2, 0x2a, 0xd0, 0xe2, 0x8e, 0xe7, 0x87,
- 0x5b, 0x04, 0xd6, 0x2a, 0x07, 0xe2, 0x27, 0xfa, 0x18, 0x2a, 0x94, 0x48, 0x68, 0xfb, 0x7b, 0x38,
- 0x6c, 0x14, 0x29, 0x95, 0x5b, 0xa7, 0x50, 0xe9, 0x50, 0x60, 0x8b, 0xb2, 0x67, 0xbf, 0x91, 0x09,
- 0xd5, 0x00, 0xfb, 0x8e, 0x3d, 0x70, 0xbe, 0x6d, 0xef, 0x0e, 0x70, 0xa3, 0xb4, 0x60, 0xdc, 0x9e,
- 0xb2, 0x62, 0x63, 0x64, 0xfd, 0x07, 0xf8, 0x38, 0xe8, 0x7a, 0xee, 0xe0, 0xb8, 0x31, 0x45, 0x01,
- 0xa6, 0xc8, 0xc0, 0x96, 0x3b, 0x38, 0xa6, 0xd6, 0xf3, 0xc6, 0x6e, 0xc8, 0x66, 0xcb, 0x74, 0xb6,
- 0x4c, 0x47, 0xe8, 0xf4, 0x3d, 0xa8, 0x0f, 0x1d, 0xb7, 0x3b, 0xf4, 0xfa, 0xdd, 0x48, 0x21, 0x40,
- 0x14, 0xf2, 0xa8, 0xf4, 0x7b, 0xd4, 0x02, 0xf7, 0xac, 0xda, 0xd0, 0x71, 0x9f, 0x79, 0x7d, 0x4b,
- 0xe8, 0x87, 0xa0, 0xd8, 0x47, 0x71, 0x94, 0x4a, 0x12, 0xc5, 0x3e, 0x52, 0x51, 0x3e, 0x80, 0x4b,
- 0x84, 0x4b, 0xcf, 0xc7, 0x76, 0x88, 0x25, 0x56, 0x35, 0x8e, 0x35, 0x3b, 0x74, 0xdc, 0x55, 0x0a,
- 0x12, 0x43, 0xb4, 0x8f, 0x52, 0x88, 0xd3, 0x49, 0x44, 0xfb, 0x28, 0x8e, 0x68, 0x7e, 0x00, 0xe5,
- 0xc8, 0x2e, 0x68, 0x0a, 0x26, 0x37, 0xb7, 0x36, 0xdb, 0xf5, 0x09, 0x04, 0x50, 0x6c, 0xed, 0xac,
- 0xb6, 0x37, 0xd7, 0xea, 0x06, 0xaa, 0x40, 0x69, 0xad, 0xcd, 0x3e, 0x72, 0xcd, 0xd2, 0x8f, 0xf9,
- 0x7e, 0x7b, 0x0a, 0x20, 0x4d, 0x81, 0x4a, 0x90, 0x7f, 0xda, 0xfe, 0xb4, 0x3e, 0x41, 0x80, 0x5f,
- 0xb4, 0xad, 0x9d, 0xf5, 0xad, 0xcd, 0xba, 0x41, 0xa8, 0xac, 0x5a, 0xed, 0x56, 0xa7, 0x5d, 0xcf,
- 0x11, 0x88, 0x67, 0x5b, 0x6b, 0xf5, 0x3c, 0x2a, 0x43, 0xe1, 0x45, 0x6b, 0xe3, 0x79, 0xbb, 0x3e,
- 0x19, 0x11, 0x93, 0xbb, 0xf8, 0x4f, 0x0c, 0x98, 0xe6, 0xe6, 0x66, 0xbe, 0x85, 0xee, 0x43, 0x71,
- 0x9f, 0xfa, 0x17, 0xdd, 0xc9, 0x95, 0xe5, 0x6b, 0x89, 0xbd, 0x11, 0xf3, 0x41, 0x8b, 0xc3, 0x22,
- 0x13, 0xf2, 0x07, 0x87, 0x41, 0x23, 0xb7, 0x90, 0xbf, 0x5d, 0x59, 0xae, 0x2f, 0xb2, 0xc8, 0xb0,
- 0xf8, 0x14, 0x1f, 0xbf, 0xb0, 0x07, 0x63, 0x6c, 0x91, 0x49, 0x84, 0x60, 0x72, 0xe8, 0xf9, 0x98,
- 0x6e, 0xf8, 0x29, 0x8b, 0xfe, 0x26, 0x5e, 0x40, 0x6d, 0xce, 0x37, 0x3b, 0xfb, 0x90, 0xe2, 0xfd,
- 0xbb, 0x01, 0xb0, 0x3d, 0x0e, 0xb3, 0x5d, 0x6c, 0x0e, 0x0a, 0x87, 0x84, 0x03, 0x77, 0x2f, 0xf6,
- 0x41, 0x7d, 0x0b, 0xdb, 0x01, 0x8e, 0x7c, 0x8b, 0x7c, 0xa0, 0x05, 0x28, 0x8d, 0x7c, 0x7c, 0xd8,
- 0x3d, 0x38, 0xa4, 0xdc, 0xa6, 0xa4, 0x9d, 0x8a, 0x64, 0xfc, 0xe9, 0x21, 0xba, 0x03, 0x55, 0x67,
- 0xcf, 0xf5, 0x7c, 0xdc, 0x65, 0x44, 0x0b, 0x2a, 0xd8, 0xb2, 0x55, 0x61, 0x93, 0x74, 0x49, 0x0a,
- 0x2c, 0x63, 0x55, 0xd4, 0xc2, 0x6e, 0x90, 0x39, 0xb9, 0x9e, 0xef, 0x1a, 0x50, 0xa1, 0xeb, 0x39,
- 0x97, 0xb2, 0x97, 0xe5, 0x42, 0x72, 0x14, 0x2d, 0xa5, 0xf0, 0xd4, 0xd2, 0xa4, 0x08, 0x2e, 0xa0,
- 0x35, 0x3c, 0xc0, 0x21, 0x3e, 0x4f, 0xf0, 0x52, 0x54, 0x99, 0xd7, 0xaa, 0x52, 0xf2, 0xfb, 0x73,
- 0x03, 0x2e, 0xc5, 0x18, 0x9e, 0x6b, 0xe9, 0x0d, 0x28, 0xf5, 0x29, 0x31, 0x26, 0x53, 0xde, 0x12,
- 0x9f, 0xe8, 0x3e, 0x4c, 0x71, 0x91, 0x82, 0x46, 0x5e, 0xbf, 0x0d, 0xa5, 0x94, 0x25, 0x26, 0x65,
- 0x20, 0xc5, 0xfc, 0x87, 0x1c, 0x94, 0xb9, 0x32, 0xb6, 0x46, 0xa8, 0x05, 0xd3, 0x3e, 0xfb, 0xe8,
- 0xd2, 0x35, 0x73, 0x19, 0x9b, 0xd9, 0x71, 0xf2, 0xc9, 0x84, 0x55, 0xe5, 0x28, 0x74, 0x18, 0xfd,
- 0x0a, 0x54, 0x04, 0x89, 0xd1, 0x38, 0xe4, 0x86, 0x6a, 0xc4, 0x09, 0xc8, 0xad, 0xfd, 0x64, 0xc2,
- 0x02, 0x0e, 0xbe, 0x3d, 0x0e, 0x51, 0x07, 0xe6, 0x04, 0x32, 0x5b, 0x1f, 0x17, 0x23, 0x4f, 0xa9,
- 0x2c, 0xc4, 0xa9, 0xa4, 0xcd, 0xf9, 0x64, 0xc2, 0x42, 0x1c, 0x5f, 0x99, 0x44, 0x6b, 0x52, 0xa4,
- 0xf0, 0x88, 0xe5, 0x97, 0x94, 0x48, 0x9d, 0x23, 0x97, 0x13, 0x11, 0xda, 0x5a, 0x51, 0x64, 0xeb,
- 0x1c, 0xb9, 0x91, 0xca, 0x1e, 0x95, 0xa1, 0xc4, 0x87, 0xcd, 0x7f, 0xcb, 0x01, 0x08, 0x8b, 0x6d,
- 0x8d, 0xd0, 0x1a, 0xd4, 0x7c, 0xfe, 0x15, 0xd3, 0xdf, 0xeb, 0x5a, 0xfd, 0x71, 0x43, 0x4f, 0x58,
- 0xd3, 0x02, 0x89, 0x89, 0xfb, 0x11, 0x54, 0x23, 0x2a, 0x52, 0x85, 0x57, 0x35, 0x2a, 0x8c, 0x28,
- 0x54, 0x04, 0x02, 0x51, 0xe2, 0x27, 0x70, 0x39, 0xc2, 0xd7, 0x68, 0xf1, 0x8d, 0x13, 0xb4, 0x18,
- 0x11, 0xbc, 0x24, 0x28, 0xa8, 0x7a, 0x7c, 0xac, 0x08, 0x26, 0x15, 0x79, 0x55, 0xa3, 0x48, 0x06,
- 0xa4, 0x6a, 0x32, 0x92, 0x30, 0xa6, 0x4a, 0x20, 0x69, 0x9f, 0x8d, 0x9b, 0x7f, 0x39, 0x09, 0xa5,
- 0x55, 0x6f, 0x38, 0xb2, 0x7d, 0xb2, 0x89, 0x8a, 0x3e, 0x0e, 0xc6, 0x83, 0x90, 0x2a, 0xb0, 0xb6,
- 0x7c, 0x33, 0xce, 0x83, 0x83, 0x89, 0x7f, 0x2d, 0x0a, 0x6a, 0x71, 0x14, 0x82, 0xcc, 0xb3, 0x7c,
- 0xee, 0x0c, 0xc8, 0x3c, 0xc7, 0x73, 0x14, 0x11, 0x10, 0xf2, 0x32, 0x20, 0x34, 0xa1, 0xc4, 0x0f,
- 0x6c, 0x2c, 0x58, 0x3f, 0x99, 0xb0, 0xc4, 0x00, 0x7a, 0x07, 0x66, 0x92, 0xa9, 0xb0, 0xc0, 0x61,
- 0x6a, 0xbd, 0x78, 0xe6, 0xbc, 0x09, 0xd5, 0x58, 0x86, 0x2e, 0x72, 0xb8, 0xca, 0x50, 0xc9, 0xcb,
- 0x57, 0x44, 0x58, 0x27, 0xc7, 0x8a, 0xea, 0x93, 0x09, 0x11, 0xd8, 0x6f, 0x88, 0xc0, 0x3e, 0xa5,
- 0x26, 0x5a, 0xa2, 0x57, 0x1e, 0xe3, 0xdf, 0x54, 0xa3, 0xd6, 0x57, 0x09, 0x72, 0x04, 0x24, 0xc3,
- 0x97, 0x69, 0xc1, 0x74, 0x4c, 0x65, 0x24, 0x47, 0xb6, 0xbf, 0xf6, 0xbc, 0xb5, 0xc1, 0x12, 0xea,
- 0x63, 0x9a, 0x43, 0xad, 0xba, 0x41, 0x12, 0xf4, 0x46, 0x7b, 0x67, 0xa7, 0x9e, 0x43, 0x57, 0xa0,
- 0xbc, 0xb9, 0xd5, 0xe9, 0x32, 0xa8, 0x7c, 0xb3, 0xf4, 0xc7, 0x2c, 0x92, 0xc8, 0xfc, 0xfc, 0x69,
- 0x44, 0x93, 0xa7, 0x68, 0x25, 0x33, 0x4f, 0x28, 0x99, 0xd9, 0x10, 0x99, 0x39, 0x27, 0x33, 0x73,
- 0x1e, 0x21, 0x28, 0x6c, 0xb4, 0x5b, 0x3b, 0x34, 0x49, 0x33, 0xd2, 0x2b, 0xe9, 0x6c, 0xfd, 0xa8,
- 0x06, 0x55, 0x66, 0x9e, 0xee, 0xd8, 0x25, 0x87, 0x89, 0x9f, 0x1a, 0x00, 0xd2, 0x61, 0xd1, 0x12,
- 0x94, 0x7a, 0x4c, 0x84, 0x86, 0x41, 0x23, 0xe0, 0x65, 0xad, 0xc5, 0x2d, 0x01, 0x85, 0xee, 0x41,
- 0x29, 0x18, 0xf7, 0x7a, 0x38, 0x10, 0x99, 0xfb, 0xb5, 0x64, 0x10, 0xe6, 0x01, 0xd1, 0x12, 0x70,
- 0x04, 0xe5, 0xa5, 0xed, 0x0c, 0xc6, 0x34, 0x8f, 0x9f, 0x8c, 0xc2, 0xe1, 0x64, 0x8c, 0xfd, 0x33,
- 0x03, 0x2a, 0x8a, 0x5b, 0xfc, 0x9c, 0x29, 0xe0, 0x1a, 0x94, 0xa9, 0x30, 0xb8, 0xcf, 0x93, 0xc0,
- 0x94, 0x25, 0x07, 0xd0, 0x57, 0xa0, 0x2c, 0x3c, 0x49, 0xe4, 0x81, 0x86, 0x9e, 0xec, 0xd6, 0xc8,
- 0x92, 0xa0, 0x52, 0xc8, 0x0e, 0xcc, 0x52, 0x3d, 0xf5, 0xc8, 0xed, 0x43, 0x68, 0x56, 0x3d, 0x96,
- 0x1b, 0x89, 0x63, 0x79, 0x13, 0xa6, 0x46, 0xfb, 0xc7, 0x81, 0xd3, 0xb3, 0x07, 0x5c, 0x9c, 0xe8,
- 0x5b, 0x52, 0xdd, 0x01, 0xa4, 0x52, 0x3d, 0x8f, 0x02, 0x24, 0xd1, 0x2b, 0x50, 0x79, 0x62, 0x07,
- 0xfb, 0x5c, 0x48, 0x39, 0x7e, 0x1f, 0xa6, 0xc9, 0xf8, 0xd3, 0x17, 0x67, 0x10, 0x5f, 0x60, 0xad,
- 0x98, 0xff, 0x68, 0x40, 0x4d, 0xa0, 0x9d, 0xcb, 0x40, 0x08, 0x26, 0xf7, 0xed, 0x60, 0x9f, 0x2a,
- 0x63, 0xda, 0xa2, 0xbf, 0xd1, 0x3b, 0x50, 0xef, 0xb1, 0xf5, 0x77, 0x13, 0xf7, 0xae, 0x19, 0x3e,
- 0x1e, 0xf9, 0xfe, 0x7b, 0x30, 0x4d, 0x50, 0xba, 0xf1, 0x7b, 0x90, 0x70, 0xe3, 0xaf, 0x58, 0xd5,
- 0x7d, 0xba, 0xe6, 0xa4, 0xf8, 0x36, 0x54, 0x99, 0x32, 0x2e, 0x5a, 0x76, 0xa9, 0xd7, 0x26, 0xcc,
- 0xec, 0xb8, 0xf6, 0x28, 0xd8, 0xf7, 0xc2, 0x84, 0xce, 0x57, 0xcc, 0xbf, 0x35, 0xa0, 0x2e, 0x27,
- 0xcf, 0x25, 0xc3, 0xdb, 0x30, 0xe3, 0xe3, 0xa1, 0xed, 0xb8, 0x8e, 0xbb, 0xd7, 0xdd, 0x3d, 0x0e,
- 0x71, 0xc0, 0xaf, 0xaf, 0xb5, 0x68, 0xf8, 0x11, 0x19, 0x25, 0xc2, 0xee, 0x0e, 0xbc, 0x5d, 0x1e,
- 0xa4, 0xe9, 0x6f, 0xf4, 0x46, 0x3c, 0x4a, 0x97, 0xa5, 0xde, 0xc4, 0xb8, 0x94, 0xf9, 0x27, 0x39,
- 0xa8, 0x7e, 0x62, 0x87, 0x3d, 0xb1, 0x83, 0xd0, 0x3a, 0xd4, 0xa2, 0x30, 0x4e, 0x47, 0xb8, 0xdc,
- 0x89, 0x03, 0x07, 0xc5, 0x11, 0xf7, 0x1a, 0x71, 0xe0, 0x98, 0xee, 0xa9, 0x03, 0x94, 0x94, 0xed,
- 0xf6, 0xf0, 0x20, 0x22, 0x95, 0xcb, 0x26, 0x45, 0x01, 0x55, 0x52, 0xea, 0x00, 0xfa, 0x3a, 0xd4,
- 0x47, 0xbe, 0xb7, 0xe7, 0xe3, 0x20, 0x88, 0x88, 0xb1, 0x14, 0x6e, 0x6a, 0x88, 0x6d, 0x73, 0xd0,
- 0xc4, 0x29, 0xe6, 0xfe, 0x93, 0x09, 0x6b, 0x66, 0x14, 0x9f, 0x93, 0x81, 0x75, 0x46, 0x9e, 0xf7,
- 0x58, 0x64, 0xfd, 0x41, 0x1e, 0x50, 0x7a, 0x99, 0x5f, 0xf4, 0x98, 0x7c, 0x0b, 0x6a, 0x41, 0x68,
- 0xfb, 0xa9, 0x3d, 0x3f, 0x4d, 0x47, 0xa3, 0x1d, 0xff, 0x36, 0x44, 0x92, 0x75, 0x5d, 0x2f, 0x74,
- 0x5e, 0x1e, 0xb3, 0x0b, 0x8a, 0x55, 0x13, 0xc3, 0x9b, 0x74, 0x14, 0x6d, 0x42, 0xe9, 0xa5, 0x33,
- 0x08, 0xb1, 0x1f, 0x34, 0x0a, 0x0b, 0xf9, 0xdb, 0xb5, 0xe5, 0x77, 0x4f, 0x33, 0xcc, 0xe2, 0xc7,
- 0x14, 0xbe, 0x73, 0x3c, 0x52, 0x4f, 0xbf, 0x9c, 0x88, 0x7a, 0x8c, 0x2f, 0xea, 0x6f, 0x44, 0x26,
- 0x4c, 0xbd, 0x22, 0x44, 0xbb, 0x4e, 0x9f, 0xe6, 0xe2, 0xc8, 0x0f, 0xef, 0x5b, 0x25, 0x3a, 0xb1,
- 0xde, 0x47, 0x37, 0x61, 0xea, 0xa5, 0x6f, 0xef, 0x0d, 0xb1, 0x1b, 0xb2, 0x5b, 0xbe, 0x84, 0x89,
- 0x26, 0xcc, 0x45, 0x00, 0x29, 0x0a, 0xc9, 0x7c, 0x9b, 0x5b, 0xdb, 0xcf, 0x3b, 0xf5, 0x09, 0x54,
- 0x85, 0xa9, 0xcd, 0xad, 0xb5, 0xf6, 0x46, 0x9b, 0xe4, 0x46, 0x91, 0xf3, 0xee, 0x49, 0xa7, 0x6b,
- 0x09, 0x43, 0xc4, 0xf6, 0x84, 0x2a, 0x97, 0x11, 0xbf, 0x74, 0x0b, 0xb9, 0x04, 0x89, 0x7b, 0xe6,
- 0x0d, 0x98, 0xd3, 0x6d, 0x0d, 0x01, 0x70, 0xdf, 0xfc, 0x97, 0x1c, 0x4c, 0x73, 0x47, 0x38, 0x97,
- 0xe7, 0x5e, 0x55, 0xa4, 0xe2, 0xd7, 0x13, 0xa1, 0xa4, 0x06, 0x94, 0x98, 0x83, 0xf4, 0xf9, 0xfd,
- 0x57, 0x7c, 0x92, 0xe0, 0xcc, 0xf6, 0x3b, 0xee, 0x73, 0xb3, 0x47, 0xdf, 0xda, 0xb0, 0x59, 0xc8,
- 0x0c, 0x9b, 0x91, 0xc3, 0xd9, 0x01, 0x3f, 0x58, 0x95, 0xa5, 0x29, 0xaa, 0xc2, 0xa9, 0xc8, 0x64,
- 0xcc, 0x66, 0xa5, 0x0c, 0x9b, 0xa1, 0x5b, 0x50, 0xc4, 0x87, 0xd8, 0x0d, 0x83, 0x46, 0x85, 0x26,
- 0xd2, 0x69, 0x71, 0xa1, 0x6a, 0x93, 0x51, 0x8b, 0x4f, 0x4a, 0x53, 0x7d, 0x04, 0xb3, 0xf4, 0xbe,
- 0xfb, 0xd8, 0xb7, 0x5d, 0xf5, 0xce, 0xde, 0xe9, 0x6c, 0xf0, 0xb4, 0x43, 0x7e, 0xa2, 0x1a, 0xe4,
- 0xd6, 0xd7, 0xb8, 0x7e, 0x72, 0xeb, 0x6b, 0x12, 0xff, 0xf7, 0x0d, 0x40, 0x2a, 0x81, 0x73, 0xd9,
- 0x22, 0xc1, 0x45, 0xc8, 0x91, 0x97, 0x72, 0xcc, 0x41, 0x01, 0xfb, 0xbe, 0xe7, 0xb3, 0x40, 0x69,
- 0xb1, 0x0f, 0x29, 0xcd, 0xfb, 0x5c, 0x18, 0x0b, 0x1f, 0x7a, 0x07, 0x51, 0x04, 0x60, 0x64, 0x8d,
- 0xb4, 0xf0, 0x1d, 0xb8, 0x14, 0x03, 0xbf, 0x98, 0x14, 0xbf, 0x05, 0x33, 0x94, 0xea, 0xea, 0x3e,
- 0xee, 0x1d, 0x8c, 0x3c, 0xc7, 0x4d, 0x49, 0x80, 0x6e, 0x92, 0xd8, 0x25, 0xd2, 0x05, 0x59, 0x22,
- 0x5b, 0x73, 0x35, 0x1a, 0xec, 0x74, 0x36, 0xe4, 0x56, 0xdf, 0x85, 0x2b, 0x09, 0x82, 0x62, 0x65,
- 0xbf, 0x0a, 0x95, 0x5e, 0x34, 0x18, 0xf0, 0x13, 0xe4, 0xf5, 0xb8, 0xb8, 0x49, 0x54, 0x15, 0x43,
- 0xf2, 0xf8, 0x3a, 0xbc, 0x96, 0xe2, 0x71, 0x11, 0xea, 0xb8, 0x6f, 0xde, 0x85, 0xcb, 0x94, 0xf2,
- 0x53, 0x8c, 0x47, 0xad, 0x81, 0x73, 0x78, 0xba, 0x59, 0x8e, 0xf9, 0x7a, 0x15, 0x8c, 0x2f, 0x77,
- 0x5b, 0x49, 0xd6, 0x6d, 0xce, 0xba, 0xe3, 0x0c, 0x71, 0xc7, 0xdb, 0xc8, 0x96, 0x96, 0x24, 0xf2,
- 0x03, 0x7c, 0x1c, 0xf0, 0xe3, 0x23, 0xfd, 0x2d, 0xa3, 0xd7, 0x5f, 0x1b, 0x5c, 0x9d, 0x2a, 0x9d,
- 0x2f, 0xd9, 0x35, 0xe6, 0x01, 0xf6, 0x88, 0x0f, 0xe2, 0x3e, 0x99, 0x60, 0xb5, 0x39, 0x65, 0x24,
- 0x12, 0x98, 0x64, 0xa1, 0x6a, 0x52, 0xe0, 0xeb, 0xdc, 0x71, 0xe8, 0x7f, 0x82, 0xd4, 0x49, 0xe9,
- 0x2d, 0xa8, 0xd0, 0x99, 0x9d, 0xd0, 0x0e, 0xc7, 0x41, 0x96, 0xe5, 0x56, 0xcc, 0x1f, 0x18, 0xdc,
- 0xa3, 0x04, 0x9d, 0x73, 0xad, 0xf9, 0x1e, 0x14, 0xe9, 0x0d, 0x51, 0xdc, 0x74, 0xae, 0x6a, 0x36,
- 0x36, 0x93, 0xc8, 0xe2, 0x80, 0xca, 0x39, 0xc9, 0x80, 0xe2, 0x33, 0xda, 0x39, 0x50, 0xa4, 0x9d,
- 0x14, 0x96, 0x73, 0xed, 0x21, 0x2b, 0x3f, 0x96, 0x2d, 0xfa, 0x9b, 0x5e, 0x08, 0x30, 0xf6, 0x9f,
- 0x5b, 0x1b, 0xec, 0x06, 0x52, 0xb6, 0xa2, 0x6f, 0xa2, 0xd8, 0xde, 0xc0, 0xc1, 0x6e, 0x48, 0x67,
- 0x27, 0xe9, 0xac, 0x32, 0x82, 0x6e, 0x41, 0xd9, 0x09, 0x36, 0xb0, 0xed, 0xbb, 0xbc, 0xc4, 0xaf,
- 0x04, 0x66, 0x39, 0x23, 0xf7, 0xd8, 0x37, 0xa0, 0xce, 0x24, 0x6b, 0xf5, 0xfb, 0xca, 0x69, 0x3f,
- 0xe2, 0x6f, 0x24, 0xf8, 0xc7, 0xe8, 0xe7, 0x4e, 0xa7, 0xff, 0x37, 0x06, 0xcc, 0x2a, 0x0c, 0xce,
- 0x65, 0x82, 0xf7, 0xa0, 0xc8, 0xfa, 0x2f, 0xfc, 0x28, 0x38, 0x17, 0xc7, 0x62, 0x6c, 0x2c, 0x0e,
- 0x83, 0x16, 0xa1, 0xc4, 0x7e, 0x89, 0x6b, 0x9c, 0x1e, 0x5c, 0x00, 0x49, 0x91, 0x17, 0xe1, 0x12,
- 0x9f, 0xc3, 0x43, 0x4f, 0xe7, 0x73, 0x93, 0xf1, 0x08, 0xf1, 0x7d, 0x03, 0xe6, 0xe2, 0x08, 0xe7,
- 0x5a, 0xa5, 0x22, 0x77, 0xee, 0x0b, 0xc9, 0xfd, 0x6b, 0x42, 0xee, 0xe7, 0xa3, 0xbe, 0x72, 0xe4,
- 0x4c, 0xee, 0x38, 0xd5, 0xba, 0xb9, 0xb8, 0x75, 0x25, 0xad, 0x1f, 0x45, 0x6b, 0x12, 0xc4, 0xce,
- 0xb5, 0xa6, 0x0f, 0xce, 0xb4, 0x26, 0xe5, 0x08, 0x96, 0x5a, 0xdc, 0xba, 0xd8, 0x46, 0x1b, 0x4e,
- 0x10, 0x65, 0x9c, 0x77, 0xa1, 0x3a, 0x70, 0x5c, 0x6c, 0xfb, 0xbc, 0x87, 0x64, 0xa8, 0xfb, 0xf1,
- 0x81, 0x15, 0x9b, 0x94, 0xa4, 0x7e, 0xdb, 0x00, 0xa4, 0xd2, 0xfa, 0xc5, 0x58, 0x6b, 0x49, 0x28,
- 0x78, 0xdb, 0xf7, 0x86, 0x5e, 0x78, 0xda, 0x36, 0xbb, 0x6f, 0xfe, 0xae, 0x01, 0x97, 0x13, 0x18,
- 0xbf, 0x08, 0xc9, 0xef, 0x9b, 0xd7, 0x60, 0x76, 0x0d, 0x8b, 0x33, 0x5e, 0xaa, 0x76, 0xb0, 0x03,
- 0x48, 0x9d, 0xbd, 0x98, 0x53, 0xcc, 0x2f, 0xc1, 0xec, 0x33, 0xef, 0x90, 0x04, 0x72, 0x32, 0x2d,
- 0xc3, 0x14, 0x2b, 0x66, 0x45, 0xfa, 0x8a, 0xbe, 0x65, 0xe8, 0xdd, 0x01, 0xa4, 0x62, 0x5e, 0x84,
- 0x38, 0x2b, 0xe6, 0x7f, 0x1b, 0x50, 0x6d, 0x0d, 0x6c, 0x7f, 0x28, 0x44, 0xf9, 0x08, 0x8a, 0xac,
- 0x32, 0xc3, 0xcb, 0xac, 0x6f, 0xc5, 0xe9, 0xa9, 0xb0, 0xec, 0xa3, 0xc5, 0xea, 0x38, 0x1c, 0x8b,
- 0x2c, 0x85, 0x77, 0x96, 0xd7, 0x12, 0x9d, 0xe6, 0x35, 0xf4, 0x3e, 0x14, 0x6c, 0x82, 0x42, 0xd3,
- 0x6b, 0x2d, 0x59, 0x2e, 0xa3, 0xd4, 0xc8, 0x95, 0xc8, 0x62, 0x50, 0xe6, 0x87, 0x50, 0x51, 0x38,
- 0xa0, 0x12, 0xe4, 0x1f, 0xb7, 0xf9, 0x35, 0xa9, 0xb5, 0xda, 0x59, 0x7f, 0xc1, 0x4a, 0x88, 0x35,
- 0x80, 0xb5, 0x76, 0xf4, 0x9d, 0xd3, 0x34, 0xf6, 0x6c, 0x4e, 0x87, 0xe7, 0x2d, 0x55, 0x42, 0x23,
- 0x4b, 0xc2, 0xdc, 0x59, 0x24, 0x94, 0x2c, 0x7e, 0xcb, 0x80, 0x69, 0xae, 0x9a, 0xf3, 0xa6, 0x66,
- 0x4a, 0x39, 0x23, 0x35, 0x2b, 0xcb, 0xb0, 0x38, 0xa0, 0x94, 0xe1, 0x9f, 0x0c, 0xa8, 0xaf, 0x79,
- 0xaf, 0xdc, 0x3d, 0xdf, 0xee, 0x47, 0x3e, 0xf8, 0x71, 0xc2, 0x9c, 0x8b, 0x89, 0x4a, 0x7f, 0x02,
- 0x5e, 0x0e, 0x24, 0xcc, 0xda, 0x90, 0xb5, 0x14, 0x96, 0xdf, 0xc5, 0xa7, 0xf9, 0x55, 0x98, 0x49,
- 0x20, 0x11, 0x03, 0xbd, 0x68, 0x6d, 0xac, 0xaf, 0x11, 0x83, 0xd0, 0x7a, 0x6f, 0x7b, 0xb3, 0xf5,
- 0x68, 0xa3, 0xcd, 0xbb, 0xb2, 0xad, 0xcd, 0xd5, 0xf6, 0x86, 0x34, 0xd4, 0x03, 0xb1, 0x82, 0x07,
- 0xe6, 0x00, 0x66, 0x15, 0x81, 0xce, 0xdb, 0x1c, 0xd3, 0xcb, 0x2b, 0xb9, 0x35, 0x60, 0x9a, 0x9f,
- 0x72, 0x92, 0x8e, 0xff, 0xd3, 0x3c, 0xd4, 0xc4, 0xd4, 0x97, 0x23, 0x05, 0xba, 0x02, 0xc5, 0xfe,
- 0xee, 0x8e, 0xf3, 0x6d, 0xd1, 0x97, 0xe5, 0x5f, 0x64, 0x7c, 0xc0, 0xf8, 0xb0, 0xd7, 0x16, 0xfc,
- 0x0b, 0x5d, 0x63, 0x0f, 0x31, 0xd6, 0xdd, 0x3e, 0x3e, 0xa2, 0x87, 0xa1, 0x49, 0x4b, 0x0e, 0xd0,
- 0xa2, 0x26, 0x7f, 0x95, 0x41, 0xef, 0xba, 0xca, 0x2b, 0x0d, 0xb4, 0x02, 0x75, 0xf2, 0xbb, 0x35,
- 0x1a, 0x0d, 0x1c, 0xdc, 0x67, 0x04, 0xc8, 0x35, 0x77, 0x52, 0x9e, 0x76, 0x52, 0x00, 0xe8, 0x06,
- 0x14, 0xe9, 0x15, 0x30, 0x68, 0x4c, 0x91, 0xbc, 0x2a, 0x41, 0xf9, 0x30, 0x7a, 0x07, 0x2a, 0x4c,
- 0xe2, 0x75, 0xf7, 0x79, 0x80, 0xe9, 0x9b, 0x05, 0xa5, 0x1e, 0xa2, 0xce, 0xc5, 0xcf, 0x59, 0x90,
- 0x75, 0xce, 0x42, 0x4b, 0x50, 0x0b, 0x42, 0xcf, 0xb7, 0xf7, 0xf0, 0x0b, 0xae, 0xb2, 0x4a, 0xbc,
- 0x68, 0x97, 0x98, 0x96, 0xe6, 0xba, 0x06, 0xb3, 0xad, 0x71, 0xb8, 0xdf, 0x76, 0x49, 0x72, 0x4c,
- 0x19, 0xf3, 0x3a, 0x20, 0x32, 0xbb, 0xe6, 0x04, 0xda, 0x69, 0x8e, 0xac, 0xdd, 0x09, 0x0f, 0xcc,
- 0x4d, 0xb8, 0x44, 0x66, 0xb1, 0x1b, 0x3a, 0x3d, 0xe5, 0x20, 0x22, 0x8e, 0xba, 0x46, 0xe2, 0xa8,
- 0x6b, 0x07, 0xc1, 0x2b, 0xcf, 0xef, 0x73, 0x63, 0x47, 0xdf, 0x92, 0xdb, 0xdf, 0x1b, 0x4c, 0x9a,
- 0xe7, 0x41, 0xec, 0x98, 0xfa, 0x05, 0xe9, 0xa1, 0x5f, 0x86, 0x92, 0x37, 0xa2, 0x4f, 0x82, 0x78,
- 0xf5, 0xef, 0xca, 0x22, 0x7b, 0x66, 0xb4, 0xc8, 0x09, 0x6f, 0xb1, 0x59, 0xa5, 0x42, 0xc5, 0xe1,
- 0x89, 0x9a, 0xf7, 0xed, 0x60, 0x1f, 0xf7, 0xb7, 0x05, 0xf1, 0x58, 0x6d, 0xf4, 0x81, 0x95, 0x98,
- 0x96, 0xb2, 0xdf, 0x93, 0xa2, 0x3f, 0xc6, 0xe1, 0x09, 0xa2, 0xab, 0xd5, 0xf7, 0xcb, 0x02, 0x85,
- 0x37, 0x0d, 0xcf, 0x82, 0xf5, 0x43, 0x03, 0xae, 0x0b, 0xb4, 0xd5, 0x7d, 0xdb, 0xdd, 0xc3, 0x42,
- 0x98, 0x9f, 0x57, 0x5f, 0xe9, 0x45, 0xe7, 0xcf, 0xb8, 0xe8, 0xa7, 0xd0, 0x88, 0x16, 0x4d, 0x2b,
- 0x31, 0xde, 0x40, 0x5d, 0xc4, 0x38, 0xe0, 0x11, 0xa1, 0x6c, 0xd1, 0xdf, 0x64, 0xcc, 0xf7, 0x06,
- 0xd1, 0x25, 0x88, 0xfc, 0x96, 0xc4, 0x36, 0xe0, 0xaa, 0x20, 0xc6, 0x4b, 0x23, 0x71, 0x6a, 0xa9,
- 0x35, 0x9d, 0x48, 0x8d, 0xdb, 0x83, 0xd0, 0x38, 0x79, 0x2b, 0x69, 0x51, 0xe2, 0x26, 0xa4, 0x5c,
- 0x0c, 0x1d, 0x97, 0x79, 0xe6, 0x01, 0x44, 0x66, 0xe5, 0xbc, 0x9a, 0x9a, 0x27, 0x24, 0xb5, 0xf3,
- 0x7c, 0x0b, 0x90, 0xf9, 0xd4, 0x16, 0xc8, 0xe6, 0x8a, 0x61, 0x3e, 0x12, 0x94, 0xa8, 0x7d, 0x1b,
- 0xfb, 0x43, 0x27, 0x08, 0x94, 0x36, 0x94, 0x4e, 0x5d, 0x6f, 0xc1, 0xe4, 0x08, 0xf3, 0xe4, 0x5d,
- 0x59, 0x46, 0xc2, 0x27, 0x14, 0x64, 0x3a, 0x2f, 0xd9, 0x0c, 0xe1, 0x86, 0x60, 0xc3, 0x0c, 0xa2,
- 0xe5, 0x93, 0x14, 0x53, 0x94, 0xbe, 0x73, 0x19, 0xa5, 0xef, 0x7c, 0xbc, 0xf4, 0x1d, 0x3b, 0x50,
- 0xaa, 0x81, 0xea, 0x62, 0x0e, 0x94, 0x1d, 0x66, 0x80, 0x28, 0xbe, 0x5d, 0x0c, 0xd5, 0x3f, 0xe0,
- 0x81, 0xea, 0xa2, 0xd2, 0x20, 0xa6, 0x6b, 0x16, 0x4d, 0x4a, 0xf1, 0x89, 0x4c, 0xa8, 0x12, 0x23,
- 0x59, 0x6a, 0x4f, 0x60, 0xd2, 0x8a, 0x8d, 0xc9, 0x60, 0x7c, 0x00, 0x73, 0xf1, 0x60, 0x7c, 0x2e,
- 0xa1, 0xe6, 0xa0, 0x10, 0x7a, 0x07, 0x58, 0x64, 0x66, 0xf6, 0x91, 0x52, 0x6b, 0x14, 0xa8, 0x2f,
- 0x46, 0xad, 0xdf, 0x94, 0x54, 0xa9, 0x03, 0x9e, 0x77, 0x05, 0x64, 0x3b, 0x8a, 0xbb, 0x2f, 0xfb,
- 0x90, 0xbc, 0x3e, 0x81, 0x2b, 0xc9, 0xe0, 0x7b, 0x31, 0x8b, 0xe8, 0x32, 0xe7, 0xd4, 0x85, 0xe7,
- 0x8b, 0x61, 0xf0, 0x99, 0x8c, 0x93, 0x4a, 0xd0, 0xbd, 0x18, 0xda, 0xbf, 0x0e, 0x4d, 0x5d, 0x0c,
- 0xbe, 0x50, 0x5f, 0x8c, 0x42, 0xf2, 0xc5, 0x50, 0xfd, 0xbe, 0x21, 0xc9, 0xaa, 0xbb, 0xe6, 0xc3,
- 0x2f, 0x42, 0x56, 0xe4, 0xba, 0xbb, 0xd1, 0xf6, 0x59, 0x8a, 0xa2, 0x65, 0x5e, 0x1f, 0x2d, 0x25,
- 0x0a, 0x05, 0x14, 0xfe, 0x27, 0x43, 0xfd, 0x97, 0xb9, 0x7b, 0x39, 0x33, 0x99, 0x77, 0xce, 0xcb,
- 0x8c, 0xa4, 0xe7, 0x88, 0x19, 0xfd, 0x48, 0xb9, 0x8a, 0x9a, 0xa4, 0x2e, 0xc6, 0x74, 0xbf, 0x21,
- 0x13, 0x4c, 0x2a, 0x8f, 0x5d, 0x0c, 0x07, 0x1b, 0x16, 0xb2, 0x53, 0xd8, 0x85, 0xb0, 0xb8, 0xd3,
- 0x82, 0x72, 0x74, 0xf3, 0x55, 0xde, 0xe9, 0x56, 0xa0, 0xb4, 0xb9, 0xb5, 0xb3, 0xdd, 0x5a, 0x25,
- 0x17, 0xbb, 0x39, 0x28, 0xad, 0x6e, 0x59, 0xd6, 0xf3, 0xed, 0x0e, 0xb9, 0xd9, 0x25, 0x9f, 0xed,
- 0x2c, 0xff, 0x2c, 0x0f, 0xb9, 0xa7, 0x2f, 0xd0, 0xa7, 0x50, 0x60, 0xcf, 0xc6, 0x4e, 0x78, 0x3d,
- 0xd8, 0x3c, 0xe9, 0x65, 0x9c, 0xf9, 0xda, 0xf7, 0xfe, 0xf3, 0x67, 0x7f, 0x98, 0x9b, 0x35, 0xab,
- 0x4b, 0x87, 0x2b, 0x4b, 0x07, 0x87, 0x4b, 0x34, 0xc9, 0x3e, 0x34, 0xee, 0xa0, 0xaf, 0x41, 0x7e,
- 0x7b, 0x1c, 0xa2, 0xcc, 0x57, 0x85, 0xcd, 0xec, 0xc7, 0x72, 0xe6, 0x65, 0x4a, 0x74, 0xc6, 0x04,
- 0x4e, 0x74, 0x34, 0x0e, 0x09, 0xc9, 0x6f, 0x41, 0x45, 0x7d, 0xea, 0x76, 0xea, 0x53, 0xc3, 0xe6,
- 0xe9, 0xcf, 0xe8, 0xcc, 0xeb, 0x94, 0xd5, 0x6b, 0x26, 0xe2, 0xac, 0xd8, 0x63, 0x3c, 0x75, 0x15,
- 0x9d, 0x23, 0x17, 0x65, 0x3e, 0x44, 0x6c, 0x66, 0xbf, 0xac, 0x4b, 0xad, 0x22, 0x3c, 0x72, 0x09,
- 0xc9, 0x6f, 0xf2, 0x27, 0x74, 0xbd, 0x10, 0xdd, 0xd0, 0xbc, 0x81, 0x52, 0xdf, 0xf6, 0x34, 0x17,
- 0xb2, 0x01, 0x38, 0x93, 0x6b, 0x94, 0xc9, 0x15, 0x73, 0x96, 0x33, 0xe9, 0x45, 0x20, 0x0f, 0x8d,
- 0x3b, 0xcb, 0x3d, 0x28, 0xd0, 0xde, 0x31, 0xfa, 0x4c, 0xfc, 0x68, 0x6a, 0xba, 0xf2, 0x19, 0x86,
- 0x8e, 0x75, 0x9d, 0xcd, 0x39, 0xca, 0xa8, 0x66, 0x96, 0x09, 0x23, 0xda, 0x39, 0x7e, 0x68, 0xdc,
- 0xb9, 0x6d, 0xdc, 0x35, 0x96, 0xff, 0xaa, 0x00, 0x05, 0xda, 0xa3, 0x40, 0x07, 0x00, 0xb2, 0x47,
- 0x9a, 0x5c, 0x5d, 0xaa, 0xfd, 0x9a, 0x5c, 0x5d, 0xba, 0xbd, 0x6a, 0x36, 0x29, 0xd3, 0x39, 0x73,
- 0x86, 0x30, 0xa5, 0xad, 0x8f, 0x25, 0xda, 0xe9, 0x21, 0x7a, 0xfc, 0xa1, 0xc1, 0x9b, 0x35, 0xcc,
- 0xcd, 0x90, 0x8e, 0x5a, 0xac, 0x3f, 0x9a, 0xdc, 0x0e, 0x9a, 0x96, 0xa8, 0xf9, 0x80, 0x32, 0x5c,
- 0x32, 0xeb, 0x92, 0xa1, 0x4f, 0x21, 0x1e, 0x1a, 0x77, 0x3e, 0x6b, 0x98, 0x97, 0xb8, 0x96, 0x13,
- 0x33, 0xe8, 0x3b, 0x50, 0x8b, 0x77, 0xf2, 0xd0, 0x4d, 0x0d, 0xaf, 0x64, 0x67, 0xb0, 0xf9, 0xe6,
- 0xc9, 0x40, 0x5c, 0xa6, 0x79, 0x2a, 0x13, 0x67, 0xce, 0x38, 0x1f, 0x60, 0x3c, 0xb2, 0x09, 0x10,
- 0xb7, 0x01, 0xfa, 0x53, 0x83, 0x37, 0x63, 0x65, 0x23, 0x0e, 0xe9, 0xa8, 0xa7, 0xfa, 0x7d, 0xcd,
- 0x5b, 0xa7, 0x40, 0x71, 0x21, 0x3e, 0xa4, 0x42, 0x7c, 0x60, 0xce, 0x49, 0x21, 0x42, 0x67, 0x88,
- 0x43, 0x8f, 0x4b, 0xf1, 0xd9, 0x35, 0xf3, 0xb5, 0x98, 0x72, 0x62, 0xb3, 0xd2, 0x58, 0xac, 0x61,
- 0xa6, 0x35, 0x56, 0xac, 0x27, 0xa7, 0x35, 0x56, 0xbc, 0xdb, 0xa6, 0x33, 0x16, 0x6f, 0x8f, 0x69,
- 0x8c, 0x15, 0xcd, 0x2c, 0xff, 0xef, 0x24, 0x94, 0x56, 0xd9, 0x9f, 0xe2, 0x20, 0x0f, 0xca, 0x51,
- 0x0b, 0x09, 0xcd, 0xeb, 0xaa, 0xd4, 0xf2, 0x2a, 0xd7, 0xbc, 0x91, 0x39, 0xcf, 0x05, 0x7a, 0x83,
- 0x0a, 0xf4, 0xba, 0x79, 0x85, 0x70, 0xe6, 0x7f, 0xed, 0xb3, 0xc4, 0x6a, 0x99, 0x4b, 0x76, 0xbf,
- 0x4f, 0x14, 0xf1, 0x9b, 0x50, 0x55, 0x1b, 0x3a, 0xe8, 0x0d, 0x6d, 0x65, 0x5c, 0xed, 0x0e, 0x35,
- 0xcd, 0x93, 0x40, 0x38, 0xe7, 0x37, 0x29, 0xe7, 0x79, 0xf3, 0xaa, 0x86, 0xb3, 0x4f, 0x41, 0x63,
- 0xcc, 0x59, 0xe7, 0x45, 0xcf, 0x3c, 0xd6, 0xe2, 0xd1, 0x33, 0x8f, 0x37, 0x6e, 0x4e, 0x64, 0x3e,
- 0xa6, 0xa0, 0x84, 0x79, 0x00, 0x20, 0x5b, 0x23, 0x48, 0xab, 0x4b, 0xe5, 0xc2, 0x9a, 0x0c, 0x0e,
- 0xe9, 0xae, 0x8a, 0x69, 0x52, 0xb6, 0x7c, 0xdf, 0x25, 0xd8, 0x0e, 0x9c, 0x20, 0x64, 0x8e, 0x39,
- 0x1d, 0x6b, 0x6c, 0x20, 0xed, 0x7a, 0xe2, 0x7d, 0x92, 0xe6, 0xcd, 0x13, 0x61, 0x38, 0xf7, 0x5b,
- 0x94, 0xfb, 0x0d, 0xb3, 0xa9, 0xe1, 0x3e, 0x62, 0xb0, 0x64, 0xb3, 0xfd, 0x5f, 0x11, 0x2a, 0xcf,
- 0x6c, 0xc7, 0x0d, 0xb1, 0x6b, 0xbb, 0x3d, 0x8c, 0x76, 0xa1, 0x40, 0x73, 0x77, 0x32, 0x10, 0xab,
- 0x75, 0xfc, 0x64, 0x20, 0x8e, 0x15, 0xb2, 0xcd, 0x05, 0xca, 0xb8, 0x69, 0x5e, 0x26, 0x8c, 0x87,
- 0x92, 0xf4, 0x12, 0x2b, 0x81, 0x1b, 0x77, 0xd0, 0x4b, 0x28, 0xf2, 0x06, 0x76, 0x82, 0x50, 0xac,
- 0xa8, 0xd6, 0xbc, 0xa6, 0x9f, 0xd4, 0xed, 0x65, 0x95, 0x4d, 0x40, 0xe1, 0x08, 0x9f, 0x43, 0x00,
- 0xd9, 0x8f, 0x49, 0x5a, 0x34, 0xd5, 0xc7, 0x69, 0x2e, 0x64, 0x03, 0xe8, 0x74, 0xaa, 0xf2, 0xec,
- 0x47, 0xb0, 0x84, 0xef, 0x37, 0x60, 0xf2, 0x89, 0x1d, 0xec, 0xa3, 0x44, 0xee, 0x55, 0xde, 0x9b,
- 0x36, 0x9b, 0xba, 0x29, 0xce, 0xe5, 0x06, 0xe5, 0x72, 0x95, 0x85, 0x32, 0x95, 0x0b, 0x7d, 0x51,
- 0x69, 0xdc, 0x41, 0x7d, 0x28, 0xb2, 0xc7, 0xa6, 0x49, 0xfd, 0xc5, 0x5e, 0xae, 0x26, 0xf5, 0x17,
- 0x7f, 0x9f, 0x7a, 0x3a, 0x97, 0x11, 0x4c, 0x89, 0x47, 0x99, 0x28, 0xf1, 0x94, 0x25, 0xf1, 0x92,
- 0xb3, 0x39, 0x9f, 0x35, 0xcd, 0x79, 0xdd, 0xa4, 0xbc, 0xae, 0x9b, 0x8d, 0x94, 0xad, 0x38, 0xe4,
- 0x43, 0xe3, 0xce, 0x5d, 0x03, 0x7d, 0x07, 0x40, 0x36, 0xac, 0x52, 0x1e, 0x98, 0x6c, 0x82, 0xa5,
- 0x3c, 0x30, 0xd5, 0xeb, 0x32, 0x17, 0x29, 0xdf, 0xdb, 0xe6, 0xcd, 0x24, 0xdf, 0xd0, 0xb7, 0xdd,
- 0xe0, 0x25, 0xf6, 0xdf, 0x67, 0xd5, 0xf2, 0x60, 0xdf, 0x19, 0x91, 0x25, 0xfb, 0x50, 0x8e, 0xfa,
- 0x09, 0xc9, 0x68, 0x9b, 0xec, 0x7c, 0x24, 0xa3, 0x6d, 0xaa, 0x11, 0x11, 0x0f, 0x3b, 0xb1, 0xdd,
- 0x22, 0x40, 0x89, 0x03, 0xfe, 0x45, 0x1d, 0x26, 0xc9, 0x81, 0x9c, 0x1c, 0x4e, 0x64, 0xb1, 0x27,
- 0xb9, 0xfa, 0x54, 0xbd, 0x3a, 0xb9, 0xfa, 0x74, 0x9d, 0x28, 0x7e, 0x38, 0x21, 0x97, 0xb5, 0x25,
- 0x56, 0x45, 0x21, 0x2b, 0xf5, 0xa0, 0xa2, 0x14, 0x81, 0x90, 0x86, 0x58, 0xbc, 0xfe, 0x9d, 0x4c,
- 0x77, 0x9a, 0x0a, 0x92, 0xf9, 0x3a, 0xe5, 0x77, 0x99, 0xa5, 0x3b, 0xca, 0xaf, 0xcf, 0x20, 0x08,
- 0x43, 0xbe, 0x3a, 0xee, 0xf7, 0x9a, 0xd5, 0xc5, 0x7d, 0x7f, 0x21, 0x1b, 0x20, 0x73, 0x75, 0xd2,
- 0xf1, 0x5f, 0x41, 0x55, 0x2d, 0xfc, 0x20, 0x8d, 0xf0, 0x89, 0x0a, 0x7d, 0x32, 0x8f, 0xe8, 0xea,
- 0x46, 0xf1, 0xc8, 0x46, 0x59, 0xda, 0x0a, 0x18, 0x61, 0x3c, 0x80, 0x12, 0x2f, 0x00, 0xe9, 0x54,
- 0x1a, 0x2f, 0xe2, 0xeb, 0x54, 0x9a, 0xa8, 0x1e, 0xc5, 0x4f, 0xcf, 0x94, 0x23, 0xb9, 0x88, 0x8a,
- 0x5c, 0xcd, 0xb9, 0x3d, 0xc6, 0x61, 0x16, 0x37, 0x59, 0xb4, 0xcd, 0xe2, 0xa6, 0xd4, 0x07, 0xb2,
- 0xb8, 0xed, 0xe1, 0x90, 0xc7, 0x03, 0x71, 0xb9, 0x46, 0x19, 0xc4, 0xd4, 0xfc, 0x68, 0x9e, 0x04,
- 0xa2, 0xbb, 0xdc, 0x48, 0x86, 0x22, 0x39, 0x1e, 0x01, 0xc8, 0x62, 0x54, 0xf2, 0xc4, 0xaa, 0xed,
- 0x13, 0x24, 0x4f, 0xac, 0xfa, 0x7a, 0x56, 0x3c, 0xf6, 0x49, 0xbe, 0xec, 0x6e, 0x45, 0x38, 0xff,
- 0xd8, 0x00, 0x94, 0x2e, 0x57, 0xa1, 0x77, 0xf5, 0xd4, 0xb5, 0x3d, 0x87, 0xe6, 0x7b, 0x67, 0x03,
- 0xd6, 0xa5, 0x33, 0x29, 0x52, 0x8f, 0x42, 0x8f, 0x5e, 0x11, 0xa1, 0xbe, 0x6b, 0xc0, 0x74, 0xac,
- 0xc4, 0x85, 0xde, 0xca, 0xb0, 0x69, 0xa2, 0xf1, 0xd0, 0x7c, 0xfb, 0x54, 0x38, 0xdd, 0x51, 0x5e,
- 0xd9, 0x01, 0xe2, 0x4e, 0xf3, 0x3b, 0x06, 0xd4, 0xe2, 0x95, 0x30, 0x94, 0x41, 0x3b, 0xd5, 0xaf,
- 0x68, 0xde, 0x3e, 0x1d, 0xf0, 0x64, 0xf3, 0xc8, 0xeb, 0xcc, 0x00, 0x4a, 0xbc, 0x64, 0xa6, 0xdb,
- 0xf8, 0xf1, 0x06, 0x87, 0x6e, 0xe3, 0x27, 0xea, 0x6d, 0x9a, 0x8d, 0xef, 0x7b, 0x03, 0xac, 0xb8,
- 0x19, 0xaf, 0xa4, 0x65, 0x71, 0x3b, 0xd9, 0xcd, 0x12, 0x65, 0xb8, 0x2c, 0x6e, 0xd2, 0xcd, 0x44,
- 0xc1, 0x0c, 0x65, 0x10, 0x3b, 0xc5, 0xcd, 0x92, 0xf5, 0x36, 0x8d, 0x9b, 0x51, 0x86, 0x8a, 0x9b,
- 0xc9, 0x42, 0x96, 0xce, 0xcd, 0x52, 0xbd, 0x18, 0x9d, 0x9b, 0xa5, 0x6b, 0x61, 0x1a, 0x3b, 0x52,
- 0xbe, 0x31, 0x37, 0xbb, 0xa4, 0x29, 0x75, 0xa1, 0xf7, 0x32, 0x94, 0xa8, 0xed, 0xec, 0x34, 0xdf,
- 0x3f, 0x23, 0x74, 0xe6, 0x1e, 0x67, 0xea, 0x17, 0x7b, 0xfc, 0x8f, 0x0c, 0x98, 0xd3, 0x55, 0xc7,
- 0x50, 0x06, 0x9f, 0x8c, 0x46, 0x50, 0x73, 0xf1, 0xac, 0xe0, 0x27, 0x6b, 0x2b, 0xda, 0xf5, 0x8f,
- 0xea, 0xff, 0xfa, 0xf9, 0xbc, 0xf1, 0x1f, 0x9f, 0xcf, 0x1b, 0xff, 0xf5, 0xf9, 0xbc, 0xf1, 0x93,
- 0xff, 0x99, 0x9f, 0xd8, 0x2d, 0xd2, 0xff, 0xbf, 0xc3, 0xca, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff,
- 0x48, 0x49, 0x02, 0x7c, 0x86, 0x42, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// KVClient is the client API for KV service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type KVClient interface {
- // Range gets the keys in the range from the key-value store.
- Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
- // Put puts the given key into the key-value store.
- // A put request increments the revision of the key-value store
- // and generates one event in the event history.
- Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
- // DeleteRange deletes the given range from the key-value store.
- // A delete request increments the revision of the key-value store
- // and generates a delete event in the event history for every deleted key.
- DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
- // Txn processes multiple requests in a single transaction.
- // A txn request increments the revision of the key-value store
- // and generates events with the same revision for every completed request.
- // It is not allowed to modify the same key several times within one txn.
- Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
- // Compact compacts the event history in the etcd key-value store. The key-value
- // store should be periodically compacted or the event history will continue to grow
- // indefinitely.
- Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
-}
-
-type kVClient struct {
- cc *grpc.ClientConn
-}
-
-func NewKVClient(cc *grpc.ClientConn) KVClient {
- return &kVClient{cc}
-}
-
-func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
- out := new(RangeResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
- out := new(PutResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
- out := new(DeleteRangeResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
- out := new(TxnResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
- out := new(CompactionResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// KVServer is the server API for KV service.
-type KVServer interface {
- // Range gets the keys in the range from the key-value store.
- Range(context.Context, *RangeRequest) (*RangeResponse, error)
- // Put puts the given key into the key-value store.
- // A put request increments the revision of the key-value store
- // and generates one event in the event history.
- Put(context.Context, *PutRequest) (*PutResponse, error)
- // DeleteRange deletes the given range from the key-value store.
- // A delete request increments the revision of the key-value store
- // and generates a delete event in the event history for every deleted key.
- DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
- // Txn processes multiple requests in a single transaction.
- // A txn request increments the revision of the key-value store
- // and generates events with the same revision for every completed request.
- // It is not allowed to modify the same key several times within one txn.
- Txn(context.Context, *TxnRequest) (*TxnResponse, error)
- // Compact compacts the event history in the etcd key-value store. The key-value
- // store should be periodically compacted or the event history will continue to grow
- // indefinitely.
- Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
-}
-
-// UnimplementedKVServer can be embedded to have forward compatible implementations.
-type UnimplementedKVServer struct {
-}
-
-func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
-}
-func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Put not implemented")
-}
-func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented")
-}
-func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented")
-}
-func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented")
-}
-
-func RegisterKVServer(s *grpc.Server, srv KVServer) {
- s.RegisterService(&_KV_serviceDesc, srv)
-}
-
-func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RangeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).Range(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/Range",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).Range(ctx, req.(*RangeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(PutRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).Put(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/Put",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).Put(ctx, req.(*PutRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteRangeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).DeleteRange(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/DeleteRange",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(TxnRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).Txn(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/Txn",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CompactionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).Compact(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/Compact",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _KV_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.KV",
- HandlerType: (*KVServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Range",
- Handler: _KV_Range_Handler,
- },
- {
- MethodName: "Put",
- Handler: _KV_Put_Handler,
- },
- {
- MethodName: "DeleteRange",
- Handler: _KV_DeleteRange_Handler,
- },
- {
- MethodName: "Txn",
- Handler: _KV_Txn_Handler,
- },
- {
- MethodName: "Compact",
- Handler: _KV_Compact_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "rpc.proto",
-}
-
-// WatchClient is the client API for Watch service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type WatchClient interface {
- // Watch watches for events happening or that have happened. Both input and output
- // are streams; the input stream is for creating and canceling watchers and the output
- // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
- // for several watches at once. The entire event history can be watched starting from the
- // last compaction revision.
- Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
-}
-
-type watchClient struct {
- cc *grpc.ClientConn
-}
-
-func NewWatchClient(cc *grpc.ClientConn) WatchClient {
- return &watchClient{cc}
-}
-
-func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...)
- if err != nil {
- return nil, err
- }
- x := &watchWatchClient{stream}
- return x, nil
-}
-
-type Watch_WatchClient interface {
- Send(*WatchRequest) error
- Recv() (*WatchResponse, error)
- grpc.ClientStream
-}
-
-type watchWatchClient struct {
- grpc.ClientStream
-}
-
-func (x *watchWatchClient) Send(m *WatchRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *watchWatchClient) Recv() (*WatchResponse, error) {
- m := new(WatchResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// WatchServer is the server API for Watch service.
-type WatchServer interface {
- // Watch watches for events happening or that have happened. Both input and output
- // are streams; the input stream is for creating and canceling watchers and the output
- // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
- // for several watches at once. The entire event history can be watched starting from the
- // last compaction revision.
- Watch(Watch_WatchServer) error
-}
-
-// UnimplementedWatchServer can be embedded to have forward compatible implementations.
-type UnimplementedWatchServer struct {
-}
-
-func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error {
- return status.Errorf(codes.Unimplemented, "method Watch not implemented")
-}
-
-func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
- s.RegisterService(&_Watch_serviceDesc, srv)
-}
-
-func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(WatchServer).Watch(&watchWatchServer{stream})
-}
-
-type Watch_WatchServer interface {
- Send(*WatchResponse) error
- Recv() (*WatchRequest, error)
- grpc.ServerStream
-}
-
-type watchWatchServer struct {
- grpc.ServerStream
-}
-
-func (x *watchWatchServer) Send(m *WatchResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *watchWatchServer) Recv() (*WatchRequest, error) {
- m := new(WatchRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-var _Watch_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Watch",
- HandlerType: (*WatchServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Watch",
- Handler: _Watch_Watch_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "rpc.proto",
-}
-
-// LeaseClient is the client API for Lease service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type LeaseClient interface {
- // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
- // within a given time to live period. All keys attached to the lease will be expired and
- // deleted if the lease expires. Each expired key generates a delete event in the event history.
- LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
- // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
- LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
- // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
- // to the server and streaming keep alive responses from the server to the client.
- LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
- // LeaseTimeToLive retrieves lease information.
- LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
- // LeaseLeases lists all existing leases.
- LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
-}
-
-type leaseClient struct {
- cc *grpc.ClientConn
-}
-
-func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
- return &leaseClient{cc}
-}
-
-func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
- out := new(LeaseGrantResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
- out := new(LeaseRevokeResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
- if err != nil {
- return nil, err
- }
- x := &leaseLeaseKeepAliveClient{stream}
- return x, nil
-}
-
-type Lease_LeaseKeepAliveClient interface {
- Send(*LeaseKeepAliveRequest) error
- Recv() (*LeaseKeepAliveResponse, error)
- grpc.ClientStream
-}
-
-type leaseLeaseKeepAliveClient struct {
- grpc.ClientStream
-}
-
-func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
- m := new(LeaseKeepAliveResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
- out := new(LeaseTimeToLiveResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
- out := new(LeaseLeasesResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// LeaseServer is the server API for Lease service.
-type LeaseServer interface {
- // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
- // within a given time to live period. All keys attached to the lease will be expired and
- // deleted if the lease expires. Each expired key generates a delete event in the event history.
- LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
- // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
- LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
- // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
- // to the server and streaming keep alive responses from the server to the client.
- LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
- // LeaseTimeToLive retrieves lease information.
- LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
- // LeaseLeases lists all existing leases.
- LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
-}
-
-// UnimplementedLeaseServer can be embedded to have forward compatible implementations.
-type UnimplementedLeaseServer struct {
-}
-
-func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error {
- return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented")
-}
-
-func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
- s.RegisterService(&_Lease_serviceDesc, srv)
-}
-
-func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LeaseGrantRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LeaseServer).LeaseGrant(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Lease/LeaseGrant",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LeaseRevokeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LeaseServer).LeaseRevoke(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
-}
-
-type Lease_LeaseKeepAliveServer interface {
- Send(*LeaseKeepAliveResponse) error
- Recv() (*LeaseKeepAliveRequest, error)
- grpc.ServerStream
-}
-
-type leaseLeaseKeepAliveServer struct {
- grpc.ServerStream
-}
-
-func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
- m := new(LeaseKeepAliveRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LeaseTimeToLiveRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LeaseLeasesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LeaseServer).LeaseLeases(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Lease/LeaseLeases",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Lease_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Lease",
- HandlerType: (*LeaseServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "LeaseGrant",
- Handler: _Lease_LeaseGrant_Handler,
- },
- {
- MethodName: "LeaseRevoke",
- Handler: _Lease_LeaseRevoke_Handler,
- },
- {
- MethodName: "LeaseTimeToLive",
- Handler: _Lease_LeaseTimeToLive_Handler,
- },
- {
- MethodName: "LeaseLeases",
- Handler: _Lease_LeaseLeases_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "LeaseKeepAlive",
- Handler: _Lease_LeaseKeepAlive_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "rpc.proto",
-}
-
-// ClusterClient is the client API for Cluster service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ClusterClient interface {
- // MemberAdd adds a member into the cluster.
- MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
- // MemberRemove removes an existing member from the cluster.
- MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
- // MemberUpdate updates the member configuration.
- MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
- // MemberList lists all the members in the cluster.
- MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
- // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
- MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error)
-}
-
-type clusterClient struct {
- cc *grpc.ClientConn
-}
-
-func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
- return &clusterClient{cc}
-}
-
-func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
- out := new(MemberAddResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
- out := new(MemberRemoveResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
- out := new(MemberUpdateResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
- out := new(MemberListResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) {
- out := new(MemberPromoteResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// ClusterServer is the server API for Cluster service.
-type ClusterServer interface {
- // MemberAdd adds a member into the cluster.
- MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
- // MemberRemove removes an existing member from the cluster.
- MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
- // MemberUpdate updates the member configuration.
- MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
- // MemberList lists all the members in the cluster.
- MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
- // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
- MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error)
-}
-
-// UnimplementedClusterServer can be embedded to have forward compatible implementations.
-type UnimplementedClusterServer struct {
-}
-
-func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented")
-}
-func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented")
-}
-func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented")
-}
-func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented")
-}
-func (*UnimplementedClusterServer) MemberPromote(ctx context.Context, req *MemberPromoteRequest) (*MemberPromoteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MemberPromote not implemented")
-}
-
-func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
- s.RegisterService(&_Cluster_serviceDesc, srv)
-}
-
-func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MemberAddRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ClusterServer).MemberAdd(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Cluster/MemberAdd",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MemberRemoveRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ClusterServer).MemberRemove(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Cluster/MemberRemove",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MemberUpdateRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ClusterServer).MemberUpdate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MemberListRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ClusterServer).MemberList(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Cluster/MemberList",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MemberPromoteRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ClusterServer).MemberPromote(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Cluster/MemberPromote",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Cluster_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Cluster",
- HandlerType: (*ClusterServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "MemberAdd",
- Handler: _Cluster_MemberAdd_Handler,
- },
- {
- MethodName: "MemberRemove",
- Handler: _Cluster_MemberRemove_Handler,
- },
- {
- MethodName: "MemberUpdate",
- Handler: _Cluster_MemberUpdate_Handler,
- },
- {
- MethodName: "MemberList",
- Handler: _Cluster_MemberList_Handler,
- },
- {
- MethodName: "MemberPromote",
- Handler: _Cluster_MemberPromote_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "rpc.proto",
-}
-
-// MaintenanceClient is the client API for Maintenance service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type MaintenanceClient interface {
- // Alarm activates, deactivates, and queries alarms regarding cluster health.
- Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
- // Status gets the status of the member.
- Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
- // Defragment defragments a member's backend database to recover storage space.
- Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
- // Hash computes the hash of whole backend keyspace,
- // including key, lease, and other buckets in storage.
- // This is designed for testing ONLY!
- // Do not rely on this in production with ongoing transactions,
- // since Hash operation does not hold MVCC locks.
- // Use "HashKV" API instead for "key" bucket consistency checks.
- Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
- // HashKV computes the hash of all MVCC keys up to a given revision.
- // It only iterates "key" bucket in backend storage.
- HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
- // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
- Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
- // MoveLeader requests current leader node to transfer its leadership to transferee.
- MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
- // Downgrade requests downgrades, verifies feasibility or cancels downgrade
- // on the cluster version.
- // Supported since etcd 3.5.
- Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error)
-}
-
-type maintenanceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
- return &maintenanceClient{cc}
-}
-
-func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
- out := new(AlarmResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
- out := new(StatusResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
- out := new(DefragmentResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
- out := new(HashResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
- out := new(HashKVResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...)
- if err != nil {
- return nil, err
- }
- x := &maintenanceSnapshotClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Maintenance_SnapshotClient interface {
- Recv() (*SnapshotResponse, error)
- grpc.ClientStream
-}
-
-type maintenanceSnapshotClient struct {
- grpc.ClientStream
-}
-
-func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
- m := new(SnapshotResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
- out := new(MoveLeaderResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error) {
- out := new(DowngradeResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Downgrade", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// MaintenanceServer is the server API for Maintenance service.
-type MaintenanceServer interface {
- // Alarm activates, deactivates, and queries alarms regarding cluster health.
- Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
- // Status gets the status of the member.
- Status(context.Context, *StatusRequest) (*StatusResponse, error)
- // Defragment defragments a member's backend database to recover storage space.
- Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
- // Hash computes the hash of whole backend keyspace,
- // including key, lease, and other buckets in storage.
- // This is designed for testing ONLY!
- // Do not rely on this in production with ongoing transactions,
- // since Hash operation does not hold MVCC locks.
- // Use "HashKV" API instead for "key" bucket consistency checks.
- Hash(context.Context, *HashRequest) (*HashResponse, error)
- // HashKV computes the hash of all MVCC keys up to a given revision.
- // It only iterates "key" bucket in backend storage.
- HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
- // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
- Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
- // MoveLeader requests current leader node to transfer its leadership to transferee.
- MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
- // Downgrade requests downgrades, verifies feasibility or cancels downgrade
- // on the cluster version.
- // Supported since etcd 3.5.
- Downgrade(context.Context, *DowngradeRequest) (*DowngradeResponse, error)
-}
-
-// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations.
-type UnimplementedMaintenanceServer struct {
-}
-
-func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented")
-}
-func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
-}
-func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented")
-}
-func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented")
-}
-func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented")
-}
-func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error {
- return status.Errorf(codes.Unimplemented, "method Snapshot not implemented")
-}
-func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented")
-}
-func (*UnimplementedMaintenanceServer) Downgrade(ctx context.Context, req *DowngradeRequest) (*DowngradeResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Downgrade not implemented")
-}
-
-func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
- s.RegisterService(&_Maintenance_serviceDesc, srv)
-}
-
-func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AlarmRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).Alarm(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/Alarm",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(StatusRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).Status(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/Status",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DefragmentRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).Defragment(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/Defragment",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(HashRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).Hash(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/Hash",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(HashKVRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).HashKV(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/HashKV",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(SnapshotRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
-}
-
-type Maintenance_SnapshotServer interface {
- Send(*SnapshotResponse) error
- grpc.ServerStream
-}
-
-type maintenanceSnapshotServer struct {
- grpc.ServerStream
-}
-
-func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MoveLeaderRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).MoveLeader(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Downgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DowngradeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).Downgrade(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/Downgrade",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).Downgrade(ctx, req.(*DowngradeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Maintenance_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Maintenance",
- HandlerType: (*MaintenanceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Alarm",
- Handler: _Maintenance_Alarm_Handler,
- },
- {
- MethodName: "Status",
- Handler: _Maintenance_Status_Handler,
- },
- {
- MethodName: "Defragment",
- Handler: _Maintenance_Defragment_Handler,
- },
- {
- MethodName: "Hash",
- Handler: _Maintenance_Hash_Handler,
- },
- {
- MethodName: "HashKV",
- Handler: _Maintenance_HashKV_Handler,
- },
- {
- MethodName: "MoveLeader",
- Handler: _Maintenance_MoveLeader_Handler,
- },
- {
- MethodName: "Downgrade",
- Handler: _Maintenance_Downgrade_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Snapshot",
- Handler: _Maintenance_Snapshot_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "rpc.proto",
-}
-
-// AuthClient is the client API for Auth service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type AuthClient interface {
- // AuthEnable enables authentication.
- AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
- // AuthDisable disables authentication.
- AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
- // AuthStatus displays authentication status.
- AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error)
- // Authenticate processes an authenticate request.
- Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
- // UserAdd adds a new user. User name cannot be empty.
- UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
- // UserGet gets detailed user information.
- UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
- // UserList gets a list of all users.
- UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
- // UserDelete deletes a specified user.
- UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
- // UserChangePassword changes the password of a specified user.
- UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
- // UserGrant grants a role to a specified user.
- UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
- // UserRevokeRole revokes a role of specified user.
- UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
- // RoleAdd adds a new role. Role name cannot be empty.
- RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
- // RoleGet gets detailed role information.
- RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
- // RoleList gets lists of all roles.
- RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
- // RoleDelete deletes a specified role.
- RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
- // RoleGrantPermission grants a permission of a specified key or range to a specified role.
- RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
- // RoleRevokePermission revokes a key or range permission of a specified role.
- RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
-}
-
-type authClient struct {
- cc *grpc.ClientConn
-}
-
-func NewAuthClient(cc *grpc.ClientConn) AuthClient {
- return &authClient{cc}
-}
-
-func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
- out := new(AuthEnableResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
- out := new(AuthDisableResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error) {
- out := new(AuthStatusResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthStatus", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
- out := new(AuthenticateResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
- out := new(AuthUserAddResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
- out := new(AuthUserGetResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
- out := new(AuthUserListResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
- out := new(AuthUserDeleteResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
- out := new(AuthUserChangePasswordResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
- out := new(AuthUserGrantRoleResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
- out := new(AuthUserRevokeRoleResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
- out := new(AuthRoleAddResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
- out := new(AuthRoleGetResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
- out := new(AuthRoleListResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
- out := new(AuthRoleDeleteResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
- out := new(AuthRoleGrantPermissionResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
- out := new(AuthRoleRevokePermissionResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// AuthServer is the server API for Auth service.
-type AuthServer interface {
- // AuthEnable enables authentication.
- AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
- // AuthDisable disables authentication.
- AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
- // AuthStatus displays authentication status.
- AuthStatus(context.Context, *AuthStatusRequest) (*AuthStatusResponse, error)
- // Authenticate processes an authenticate request.
- Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
- // UserAdd adds a new user. User name cannot be empty.
- UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
- // UserGet gets detailed user information.
- UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
- // UserList gets a list of all users.
- UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
- // UserDelete deletes a specified user.
- UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
- // UserChangePassword changes the password of a specified user.
- UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
- // UserGrant grants a role to a specified user.
- UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
- // UserRevokeRole revokes a role of specified user.
- UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
- // RoleAdd adds a new role. Role name cannot be empty.
- RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
- // RoleGet gets detailed role information.
- RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
- // RoleList gets lists of all roles.
- RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
- // RoleDelete deletes a specified role.
- RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
- // RoleGrantPermission grants a permission of a specified key or range to a specified role.
- RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
- // RoleRevokePermission revokes a key or range permission of a specified role.
- RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
-}
-
-// UnimplementedAuthServer can be embedded to have forward compatible implementations.
-type UnimplementedAuthServer struct {
-}
-
-func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented")
-}
-func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented")
-}
-func (*UnimplementedAuthServer) AuthStatus(ctx context.Context, req *AuthStatusRequest) (*AuthStatusResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AuthStatus not implemented")
-}
-func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented")
-}
-func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented")
-}
-func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented")
-}
-func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented")
-}
-func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented")
-}
-func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented")
-}
-func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented")
-}
-func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented")
-}
-func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented")
-}
-func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented")
-}
-func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented")
-}
-func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented")
-}
-func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented")
-}
-func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented")
-}
-
-func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
- s.RegisterService(&_Auth_serviceDesc, srv)
-}
-
-func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthEnableRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).AuthEnable(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/AuthEnable",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthDisableRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).AuthDisable(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/AuthDisable",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_AuthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthStatusRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).AuthStatus(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/AuthStatus",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).AuthStatus(ctx, req.(*AuthStatusRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthenticateRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).Authenticate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/Authenticate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserAddRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserAdd(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserAdd",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserGetRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserGet(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserGet",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserListRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserList(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserList",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserDeleteRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserDelete(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserDelete",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserChangePasswordRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserChangePassword(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserChangePassword",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserGrantRoleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserGrantRole(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserGrantRole",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserRevokeRoleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserRevokeRole(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleAddRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleAdd(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleAdd",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleGetRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleGet(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleGet",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleListRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleList(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleList",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleDeleteRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleDelete(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleDelete",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleGrantPermissionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleGrantPermission(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleRevokePermissionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleRevokePermission(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Auth_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Auth",
- HandlerType: (*AuthServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "AuthEnable",
- Handler: _Auth_AuthEnable_Handler,
- },
- {
- MethodName: "AuthDisable",
- Handler: _Auth_AuthDisable_Handler,
- },
- {
- MethodName: "AuthStatus",
- Handler: _Auth_AuthStatus_Handler,
- },
- {
- MethodName: "Authenticate",
- Handler: _Auth_Authenticate_Handler,
- },
- {
- MethodName: "UserAdd",
- Handler: _Auth_UserAdd_Handler,
- },
- {
- MethodName: "UserGet",
- Handler: _Auth_UserGet_Handler,
- },
- {
- MethodName: "UserList",
- Handler: _Auth_UserList_Handler,
- },
- {
- MethodName: "UserDelete",
- Handler: _Auth_UserDelete_Handler,
- },
- {
- MethodName: "UserChangePassword",
- Handler: _Auth_UserChangePassword_Handler,
- },
- {
- MethodName: "UserGrantRole",
- Handler: _Auth_UserGrantRole_Handler,
- },
- {
- MethodName: "UserRevokeRole",
- Handler: _Auth_UserRevokeRole_Handler,
- },
- {
- MethodName: "RoleAdd",
- Handler: _Auth_RoleAdd_Handler,
- },
- {
- MethodName: "RoleGet",
- Handler: _Auth_RoleGet_Handler,
- },
- {
- MethodName: "RoleList",
- Handler: _Auth_RoleList_Handler,
- },
- {
- MethodName: "RoleDelete",
- Handler: _Auth_RoleDelete_Handler,
- },
- {
- MethodName: "RoleGrantPermission",
- Handler: _Auth_RoleGrantPermission_Handler,
- },
- {
- MethodName: "RoleRevokePermission",
- Handler: _Auth_RoleRevokePermission_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "rpc.proto",
-}
-
-func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.RaftTerm != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
- i--
- dAtA[i] = 0x20
- }
- if m.Revision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
- i--
- dAtA[i] = 0x18
- }
- if m.MemberId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
- i--
- dAtA[i] = 0x10
- }
- if m.ClusterId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.MaxCreateRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
- i--
- dAtA[i] = 0x68
- }
- if m.MinCreateRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
- i--
- dAtA[i] = 0x60
- }
- if m.MaxModRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
- i--
- dAtA[i] = 0x58
- }
- if m.MinModRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
- i--
- dAtA[i] = 0x50
- }
- if m.CountOnly {
- i--
- if m.CountOnly {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x48
- }
- if m.KeysOnly {
- i--
- if m.KeysOnly {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x40
- }
- if m.Serializable {
- i--
- if m.Serializable {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x38
- }
- if m.SortTarget != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
- i--
- dAtA[i] = 0x30
- }
- if m.SortOrder != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
- i--
- dAtA[i] = 0x28
- }
- if m.Revision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
- i--
- dAtA[i] = 0x20
- }
- if m.Limit != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
- i--
- dAtA[i] = 0x18
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Count != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Count))
- i--
- dAtA[i] = 0x20
- }
- if m.More {
- i--
- if m.More {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if len(m.Kvs) > 0 {
- for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *PutRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.IgnoreLease {
- i--
- if m.IgnoreLease {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x30
- }
- if m.IgnoreValue {
- i--
- if m.IgnoreValue {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x28
- }
- if m.PrevKv {
- i--
- if m.PrevKv {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x20
- }
- if m.Lease != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Value) > 0 {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *PutResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.PrevKv != nil {
- {
- size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.PrevKv {
- i--
- if m.PrevKv {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.PrevKvs) > 0 {
- for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.Deleted != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RequestOp) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Request != nil {
- {
- size := m.Request.Size()
- i -= size
- if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.RequestRange != nil {
- {
- size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.RequestPut != nil {
- {
- size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.RequestDeleteRange != nil {
- {
- size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.RequestTxn != nil {
- {
- size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- return len(dAtA) - i, nil
-}
-func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Response != nil {
- {
- size := m.Response.Size()
- i -= size
- if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ResponseRange != nil {
- {
- size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ResponsePut != nil {
- {
- size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ResponseDeleteRange != nil {
- {
- size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ResponseTxn != nil {
- {
- size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- return len(dAtA) - i, nil
-}
-func (m *Compare) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x4
- i--
- dAtA[i] = 0x82
- }
- if m.TargetUnion != nil {
- {
- size := m.TargetUnion.Size()
- i -= size
- if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0x1a
- }
- if m.Target != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Target))
- i--
- dAtA[i] = 0x10
- }
- if m.Result != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Result))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintRpc(dAtA, i, uint64(m.Version))
- i--
- dAtA[i] = 0x20
- return len(dAtA) - i, nil
-}
-func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
- i--
- dAtA[i] = 0x28
- return len(dAtA) - i, nil
-}
-func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
- i--
- dAtA[i] = 0x30
- return len(dAtA) - i, nil
-}
-func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Value != nil {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x3a
- }
- return len(dAtA) - i, nil
-}
-func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
- i--
- dAtA[i] = 0x40
- return len(dAtA) - i, nil
-}
-func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Failure) > 0 {
- for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Success) > 0 {
- for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Compare) > 0 {
- for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Responses) > 0 {
- for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.Succeeded {
- i--
- if m.Succeeded {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Physical {
- i--
- if m.Physical {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if m.Revision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HashRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Revision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.HashRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.HashRevision))
- i--
- dAtA[i] = 0x20
- }
- if m.CompactRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
- i--
- dAtA[i] = 0x18
- }
- if m.Hash != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HashResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Hash != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Version) > 0 {
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0x22
- }
- if len(m.Blob) > 0 {
- i -= len(m.Blob)
- copy(dAtA[i:], m.Blob)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
- i--
- dAtA[i] = 0x1a
- }
- if m.RemainingBytes != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.RequestUnion != nil {
- {
- size := m.RequestUnion.Size()
- i -= size
- if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.CreateRequest != nil {
- {
- size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.CancelRequest != nil {
- {
- size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ProgressRequest != nil {
- {
- size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- return len(dAtA) - i, nil
-}
-func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Fragment {
- i--
- if m.Fragment {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x40
- }
- if m.WatchId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
- i--
- dAtA[i] = 0x38
- }
- if m.PrevKv {
- i--
- if m.PrevKv {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x30
- }
- if len(m.Filters) > 0 {
- dAtA22 := make([]byte, len(m.Filters)*10)
- var j21 int
- for _, num := range m.Filters {
- for num >= 1<<7 {
- dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j21++
- }
- dAtA22[j21] = uint8(num)
- j21++
- }
- i -= j21
- copy(dAtA[i:], dAtA22[:j21])
- i = encodeVarintRpc(dAtA, i, uint64(j21))
- i--
- dAtA[i] = 0x2a
- }
- if m.ProgressNotify {
- i--
- if m.ProgressNotify {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x20
- }
- if m.StartRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
- i--
- dAtA[i] = 0x18
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.WatchId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Events) > 0 {
- for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- }
- if m.Fragment {
- i--
- if m.Fragment {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x38
- }
- if len(m.CancelReason) > 0 {
- i -= len(m.CancelReason)
- copy(dAtA[i:], m.CancelReason)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
- i--
- dAtA[i] = 0x32
- }
- if m.CompactRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
- i--
- dAtA[i] = 0x28
- }
- if m.Canceled {
- i--
- if m.Canceled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x20
- }
- if m.Created {
- i--
- if m.Created {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if m.WatchId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x10
- }
- if m.TTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Error) > 0 {
- i -= len(m.Error)
- copy(dAtA[i:], m.Error)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
- i--
- dAtA[i] = 0x22
- }
- if m.TTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
- i--
- dAtA[i] = 0x18
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseCheckpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Remaining_TTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL))
- i--
- dAtA[i] = 0x10
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseCheckpointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Checkpoints) > 0 {
- for iNdEx := len(m.Checkpoints) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Checkpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.TTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
- i--
- dAtA[i] = 0x18
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Keys {
- i--
- if m.Keys {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Keys) > 0 {
- for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Keys[iNdEx])
- copy(dAtA[i:], m.Keys[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx])))
- i--
- dAtA[i] = 0x2a
- }
- }
- if m.GrantedTTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
- i--
- dAtA[i] = 0x20
- }
- if m.TTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
- i--
- dAtA[i] = 0x18
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Leases) > 0 {
- for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Member) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Member) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.IsLearner {
- i--
- if m.IsLearner {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x28
- }
- if len(m.ClientURLs) > 0 {
- for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ClientURLs[iNdEx])
- copy(dAtA[i:], m.ClientURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx])))
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.PeerURLs) > 0 {
- for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.PeerURLs[iNdEx])
- copy(dAtA[i:], m.PeerURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x12
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.IsLearner {
- i--
- if m.IsLearner {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if len(m.PeerURLs) > 0 {
- for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.PeerURLs[iNdEx])
- copy(dAtA[i:], m.PeerURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Members) > 0 {
- for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.Member != nil {
- {
- size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Members) > 0 {
- for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.PeerURLs) > 0 {
- for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.PeerURLs[iNdEx])
- copy(dAtA[i:], m.PeerURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Members) > 0 {
- for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Linearizable {
- i--
- if m.Linearizable {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Members) > 0 {
- for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberPromoteRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberPromoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberPromoteResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberPromoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Members) > 0 {
- for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.TargetID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Alarm != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
- i--
- dAtA[i] = 0x18
- }
- if m.MemberID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
- i--
- dAtA[i] = 0x10
- }
- if m.Action != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Action))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Alarm != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
- i--
- dAtA[i] = 0x10
- }
- if m.MemberID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Alarms) > 0 {
- for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DowngradeRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DowngradeRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DowngradeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Version) > 0 {
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0x12
- }
- if m.Action != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Action))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DowngradeResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DowngradeResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DowngradeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Version) > 0 {
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.StorageVersion) > 0 {
- i -= len(m.StorageVersion)
- copy(dAtA[i:], m.StorageVersion)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.StorageVersion)))
- i--
- dAtA[i] = 0x5a
- }
- if m.IsLearner {
- i--
- if m.IsLearner {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x50
- }
- if m.DbSizeInUse != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse))
- i--
- dAtA[i] = 0x48
- }
- if len(m.Errors) > 0 {
- for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Errors[iNdEx])
- copy(dAtA[i:], m.Errors[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Errors[iNdEx])))
- i--
- dAtA[i] = 0x42
- }
- }
- if m.RaftAppliedIndex != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex))
- i--
- dAtA[i] = 0x38
- }
- if m.RaftTerm != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
- i--
- dAtA[i] = 0x30
- }
- if m.RaftIndex != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
- i--
- dAtA[i] = 0x28
- }
- if m.Leader != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
- i--
- dAtA[i] = 0x20
- }
- if m.DbSize != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Version) > 0 {
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthStatusRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthStatusRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.HashedPassword) > 0 {
- i -= len(m.HashedPassword)
- copy(dAtA[i:], m.HashedPassword)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword)))
- i--
- dAtA[i] = 0x22
- }
- if m.Options != nil {
- {
- size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.HashedPassword) > 0 {
- i -= len(m.HashedPassword)
- copy(dAtA[i:], m.HashedPassword)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.User) > 0 {
- i -= len(m.User)
- copy(dAtA[i:], m.User)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Perm != nil {
- {
- size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthStatusResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthStatusResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.AuthRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.AuthRevision))
- i--
- dAtA[i] = 0x18
- }
- if m.Enabled {
- i--
- if m.Enabled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Token) > 0 {
- i -= len(m.Token)
- copy(dAtA[i:], m.Token)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Roles) > 0 {
- for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Roles[iNdEx])
- copy(dAtA[i:], m.Roles[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Perm) > 0 {
- for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Roles) > 0 {
- for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Roles[iNdEx])
- copy(dAtA[i:], m.Roles[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Users) > 0 {
- for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Users[iNdEx])
- copy(dAtA[i:], m.Users[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
- offset -= sovRpc(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ResponseHeader) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ClusterId != 0 {
- n += 1 + sovRpc(uint64(m.ClusterId))
- }
- if m.MemberId != 0 {
- n += 1 + sovRpc(uint64(m.MemberId))
- }
- if m.Revision != 0 {
- n += 1 + sovRpc(uint64(m.Revision))
- }
- if m.RaftTerm != 0 {
- n += 1 + sovRpc(uint64(m.RaftTerm))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RangeRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Limit != 0 {
- n += 1 + sovRpc(uint64(m.Limit))
- }
- if m.Revision != 0 {
- n += 1 + sovRpc(uint64(m.Revision))
- }
- if m.SortOrder != 0 {
- n += 1 + sovRpc(uint64(m.SortOrder))
- }
- if m.SortTarget != 0 {
- n += 1 + sovRpc(uint64(m.SortTarget))
- }
- if m.Serializable {
- n += 2
- }
- if m.KeysOnly {
- n += 2
- }
- if m.CountOnly {
- n += 2
- }
- if m.MinModRevision != 0 {
- n += 1 + sovRpc(uint64(m.MinModRevision))
- }
- if m.MaxModRevision != 0 {
- n += 1 + sovRpc(uint64(m.MaxModRevision))
- }
- if m.MinCreateRevision != 0 {
- n += 1 + sovRpc(uint64(m.MinCreateRevision))
- }
- if m.MaxCreateRevision != 0 {
- n += 1 + sovRpc(uint64(m.MaxCreateRevision))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RangeResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Kvs) > 0 {
- for _, e := range m.Kvs {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.More {
- n += 2
- }
- if m.Count != 0 {
- n += 1 + sovRpc(uint64(m.Count))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *PutRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Lease != 0 {
- n += 1 + sovRpc(uint64(m.Lease))
- }
- if m.PrevKv {
- n += 2
- }
- if m.IgnoreValue {
- n += 2
- }
- if m.IgnoreLease {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *PutResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.PrevKv != nil {
- l = m.PrevKv.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DeleteRangeRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.PrevKv {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DeleteRangeResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Deleted != 0 {
- n += 1 + sovRpc(uint64(m.Deleted))
- }
- if len(m.PrevKvs) > 0 {
- for _, e := range m.PrevKvs {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RequestOp) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Request != nil {
- n += m.Request.Size()
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RequestOp_RequestRange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestRange != nil {
- l = m.RequestRange.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *RequestOp_RequestPut) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestPut != nil {
- l = m.RequestPut.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *RequestOp_RequestDeleteRange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestDeleteRange != nil {
- l = m.RequestDeleteRange.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *RequestOp_RequestTxn) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestTxn != nil {
- l = m.RequestTxn.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *ResponseOp) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Response != nil {
- n += m.Response.Size()
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ResponseOp_ResponseRange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ResponseRange != nil {
- l = m.ResponseRange.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *ResponseOp_ResponsePut) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ResponsePut != nil {
- l = m.ResponsePut.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ResponseDeleteRange != nil {
- l = m.ResponseDeleteRange.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *ResponseOp_ResponseTxn) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ResponseTxn != nil {
- l = m.ResponseTxn.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *Compare) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Result != 0 {
- n += 1 + sovRpc(uint64(m.Result))
- }
- if m.Target != 0 {
- n += 1 + sovRpc(uint64(m.Target))
- }
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.TargetUnion != nil {
- n += m.TargetUnion.Size()
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Compare_Version) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRpc(uint64(m.Version))
- return n
-}
-func (m *Compare_CreateRevision) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRpc(uint64(m.CreateRevision))
- return n
-}
-func (m *Compare_ModRevision) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRpc(uint64(m.ModRevision))
- return n
-}
-func (m *Compare_Value) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Value != nil {
- l = len(m.Value)
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *Compare_Lease) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRpc(uint64(m.Lease))
- return n
-}
-func (m *TxnRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Compare) > 0 {
- for _, e := range m.Compare {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if len(m.Success) > 0 {
- for _, e := range m.Success {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if len(m.Failure) > 0 {
- for _, e := range m.Failure {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *TxnResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Succeeded {
- n += 2
- }
- if len(m.Responses) > 0 {
- for _, e := range m.Responses {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *CompactionRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Revision != 0 {
- n += 1 + sovRpc(uint64(m.Revision))
- }
- if m.Physical {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *CompactionResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HashRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HashKVRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Revision != 0 {
- n += 1 + sovRpc(uint64(m.Revision))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HashKVResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Hash != 0 {
- n += 1 + sovRpc(uint64(m.Hash))
- }
- if m.CompactRevision != 0 {
- n += 1 + sovRpc(uint64(m.CompactRevision))
- }
- if m.HashRevision != 0 {
- n += 1 + sovRpc(uint64(m.HashRevision))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HashResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Hash != 0 {
- n += 1 + sovRpc(uint64(m.Hash))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *SnapshotRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *SnapshotResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.RemainingBytes != 0 {
- n += 1 + sovRpc(uint64(m.RemainingBytes))
- }
- l = len(m.Blob)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Version)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestUnion != nil {
- n += m.RequestUnion.Size()
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchRequest_CreateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.CreateRequest != nil {
- l = m.CreateRequest.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *WatchRequest_CancelRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.CancelRequest != nil {
- l = m.CancelRequest.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *WatchRequest_ProgressRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ProgressRequest != nil {
- l = m.ProgressRequest.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *WatchCreateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.StartRevision != 0 {
- n += 1 + sovRpc(uint64(m.StartRevision))
- }
- if m.ProgressNotify {
- n += 2
- }
- if len(m.Filters) > 0 {
- l = 0
- for _, e := range m.Filters {
- l += sovRpc(uint64(e))
- }
- n += 1 + sovRpc(uint64(l)) + l
- }
- if m.PrevKv {
- n += 2
- }
- if m.WatchId != 0 {
- n += 1 + sovRpc(uint64(m.WatchId))
- }
- if m.Fragment {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchCancelRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.WatchId != 0 {
- n += 1 + sovRpc(uint64(m.WatchId))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchProgressRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.WatchId != 0 {
- n += 1 + sovRpc(uint64(m.WatchId))
- }
- if m.Created {
- n += 2
- }
- if m.Canceled {
- n += 2
- }
- if m.CompactRevision != 0 {
- n += 1 + sovRpc(uint64(m.CompactRevision))
- }
- l = len(m.CancelReason)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Fragment {
- n += 2
- }
- if len(m.Events) > 0 {
- for _, e := range m.Events {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseGrantRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TTL != 0 {
- n += 1 + sovRpc(uint64(m.TTL))
- }
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseGrantResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.TTL != 0 {
- n += 1 + sovRpc(uint64(m.TTL))
- }
- l = len(m.Error)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseRevokeRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseRevokeResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseCheckpoint) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.Remaining_TTL != 0 {
- n += 1 + sovRpc(uint64(m.Remaining_TTL))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseCheckpointRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Checkpoints) > 0 {
- for _, e := range m.Checkpoints {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseCheckpointResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseKeepAliveRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseKeepAliveResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.TTL != 0 {
- n += 1 + sovRpc(uint64(m.TTL))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseTimeToLiveRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.Keys {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseTimeToLiveResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.TTL != 0 {
- n += 1 + sovRpc(uint64(m.TTL))
- }
- if m.GrantedTTL != 0 {
- n += 1 + sovRpc(uint64(m.GrantedTTL))
- }
- if len(m.Keys) > 0 {
- for _, b := range m.Keys {
- l = len(b)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseLeasesRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseLeasesResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Leases) > 0 {
- for _, e := range m.Leases {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Member) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.PeerURLs) > 0 {
- for _, s := range m.PeerURLs {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if len(m.ClientURLs) > 0 {
- for _, s := range m.ClientURLs {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.IsLearner {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberAddRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.PeerURLs) > 0 {
- for _, s := range m.PeerURLs {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.IsLearner {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberAddResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Member != nil {
- l = m.Member.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Members) > 0 {
- for _, e := range m.Members {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberRemoveRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberRemoveResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Members) > 0 {
- for _, e := range m.Members {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberUpdateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if len(m.PeerURLs) > 0 {
- for _, s := range m.PeerURLs {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberUpdateResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Members) > 0 {
- for _, e := range m.Members {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberListRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Linearizable {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberListResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Members) > 0 {
- for _, e := range m.Members {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberPromoteRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberPromoteResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Members) > 0 {
- for _, e := range m.Members {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DefragmentRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DefragmentResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MoveLeaderRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TargetID != 0 {
- n += 1 + sovRpc(uint64(m.TargetID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MoveLeaderResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AlarmRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Action != 0 {
- n += 1 + sovRpc(uint64(m.Action))
- }
- if m.MemberID != 0 {
- n += 1 + sovRpc(uint64(m.MemberID))
- }
- if m.Alarm != 0 {
- n += 1 + sovRpc(uint64(m.Alarm))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AlarmMember) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.MemberID != 0 {
- n += 1 + sovRpc(uint64(m.MemberID))
- }
- if m.Alarm != 0 {
- n += 1 + sovRpc(uint64(m.Alarm))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AlarmResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Alarms) > 0 {
- for _, e := range m.Alarms {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DowngradeRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Action != 0 {
- n += 1 + sovRpc(uint64(m.Action))
- }
- l = len(m.Version)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DowngradeResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Version)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *StatusRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *StatusResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Version)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.DbSize != 0 {
- n += 1 + sovRpc(uint64(m.DbSize))
- }
- if m.Leader != 0 {
- n += 1 + sovRpc(uint64(m.Leader))
- }
- if m.RaftIndex != 0 {
- n += 1 + sovRpc(uint64(m.RaftIndex))
- }
- if m.RaftTerm != 0 {
- n += 1 + sovRpc(uint64(m.RaftTerm))
- }
- if m.RaftAppliedIndex != 0 {
- n += 1 + sovRpc(uint64(m.RaftAppliedIndex))
- }
- if len(m.Errors) > 0 {
- for _, s := range m.Errors {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.DbSizeInUse != 0 {
- n += 1 + sovRpc(uint64(m.DbSizeInUse))
- }
- if m.IsLearner {
- n += 2
- }
- l = len(m.StorageVersion)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthEnableRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthDisableRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthStatusRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthenticateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserAddRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Options != nil {
- l = m.Options.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.HashedPassword)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserGetRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserDeleteRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserChangePasswordRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.HashedPassword)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserGrantRoleRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.User)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserRevokeRoleRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleAddRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleGetRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserListRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleListRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleDeleteRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Perm != nil {
- l = m.Perm.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthEnableResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthDisableResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthStatusResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Enabled {
- n += 2
- }
- if m.AuthRevision != 0 {
- n += 1 + sovRpc(uint64(m.AuthRevision))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthenticateResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Token)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserAddResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserGetResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Roles) > 0 {
- for _, s := range m.Roles {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserDeleteResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserChangePasswordResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserGrantRoleResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserRevokeRoleResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleAddResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleGetResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Perm) > 0 {
- for _, e := range m.Perm {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleListResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Roles) > 0 {
- for _, s := range m.Roles {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserListResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Users) > 0 {
- for _, s := range m.Users {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleDeleteResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovRpc(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRpc(x uint64) (n int) {
- return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
- }
- m.ClusterId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ClusterId |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
- }
- m.MemberId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemberId |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
- }
- m.Revision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Revision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
- }
- m.RaftTerm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RaftTerm |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RangeRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- m.Limit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Limit |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
- }
- m.Revision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Revision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
- }
- m.SortOrder = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
- }
- m.SortTarget = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Serializable = bool(v != 0)
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.KeysOnly = bool(v != 0)
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.CountOnly = bool(v != 0)
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
- }
- m.MinModRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MinModRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
- }
- m.MaxModRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MaxModRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
- }
- m.MinCreateRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MinCreateRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 13:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
- }
- m.MaxCreateRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MaxCreateRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RangeResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
- if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.More = bool(v != 0)
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Count |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PutRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
- if m.Value == nil {
- m.Value = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
- }
- m.Lease = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Lease |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PrevKv = bool(v != 0)
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IgnoreValue = bool(v != 0)
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IgnoreLease = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PutResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.PrevKv == nil {
- m.PrevKv = &mvccpb.KeyValue{}
- }
- if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PrevKv = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
- }
- m.Deleted = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Deleted |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
- if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RequestOp) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &RangeRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Request = &RequestOp_RequestRange{v}
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &PutRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Request = &RequestOp_RequestPut{v}
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &DeleteRangeRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Request = &RequestOp_RequestDeleteRange{v}
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &TxnRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Request = &RequestOp_RequestTxn{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResponseOp) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &RangeResponse{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Response = &ResponseOp_ResponseRange{v}
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &PutResponse{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Response = &ResponseOp_ResponsePut{v}
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &DeleteRangeResponse{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Response = &ResponseOp_ResponseDeleteRange{v}
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &TxnResponse{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Response = &ResponseOp_ResponseTxn{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Compare) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Compare: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
- }
- m.Result = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Result |= Compare_CompareResult(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
- }
- m.Target = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Target |= Compare_CompareTarget(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TargetUnion = &Compare_Version{v}
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TargetUnion = &Compare_CreateRevision{v}
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TargetUnion = &Compare_ModRevision{v}
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := make([]byte, postIndex-iNdEx)
- copy(v, dAtA[iNdEx:postIndex])
- m.TargetUnion = &Compare_Value{v}
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TargetUnion = &Compare_Lease{v}
- case 64:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *TxnRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Compare = append(m.Compare, &Compare{})
- if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Success = append(m.Success, &RequestOp{})
- if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Failure = append(m.Failure, &RequestOp{})
- if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *TxnResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Succeeded = bool(v != 0)
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Responses = append(m.Responses, &ResponseOp{})
- if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
- }
- m.Revision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Revision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Physical = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HashRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
- }
- m.Revision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Revision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
- }
- m.Hash = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Hash |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
- }
- m.CompactRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CompactRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field HashRevision", wireType)
- }
- m.HashRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.HashRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HashResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
- }
- m.Hash = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Hash |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
- }
- m.RemainingBytes = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RemainingBytes |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
- if m.Blob == nil {
- m.Blob = []byte{}
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Version = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &WatchCreateRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.RequestUnion = &WatchRequest_CreateRequest{v}
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &WatchCancelRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.RequestUnion = &WatchRequest_CancelRequest{v}
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &WatchProgressRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.RequestUnion = &WatchRequest_ProgressRequest{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
- }
- m.StartRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StartRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.ProgressNotify = bool(v != 0)
- case 5:
- if wireType == 0 {
- var v WatchCreateRequest_FilterType
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= WatchCreateRequest_FilterType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Filters = append(m.Filters, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- if elementCount != 0 && len(m.Filters) == 0 {
- m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v WatchCreateRequest_FilterType
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= WatchCreateRequest_FilterType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Filters = append(m.Filters, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PrevKv = bool(v != 0)
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
- }
- m.WatchId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.WatchId |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Fragment = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
- }
- m.WatchId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.WatchId |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
- }
- m.WatchId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.WatchId |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Created = bool(v != 0)
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Canceled = bool(v != 0)
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
- }
- m.CompactRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CompactRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.CancelReason = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Fragment = bool(v != 0)
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Events = append(m.Events, &mvccpb.Event{})
- if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
- }
- m.TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
- }
- m.TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Error = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType)
- }
- m.Remaining_TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Remaining_TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{})
- if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
- }
- m.TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Keys = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
- }
- m.TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
- }
- m.GrantedTTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.GrantedTTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
- copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Leases = append(m.Leases, &LeaseStatus{})
- if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Member) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Member: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IsLearner = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IsLearner = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Member == nil {
- m.Member = &Member{}
- }
- if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Members = append(m.Members, &Member{})
- if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Members = append(m.Members, &Member{})
- if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Members = append(m.Members, &Member{})
- if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Linearizable", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Linearizable = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Members = append(m.Members, &Member{})
- if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberPromoteRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberPromoteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberPromoteResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberPromoteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Members = append(m.Members, &Member{})
- if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
- }
- m.TargetID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TargetID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
- }
- m.Action = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
- }
- m.MemberID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemberID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
- }
- m.Alarm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Alarm |= AlarmType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AlarmMember) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
- }
- m.MemberID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemberID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
- }
- m.Alarm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Alarm |= AlarmType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Alarms = append(m.Alarms, &AlarmMember{})
- if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DowngradeRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DowngradeRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DowngradeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
- }
- m.Action = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Action |= DowngradeRequest_DowngradeAction(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Version = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DowngradeResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DowngradeResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DowngradeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Version = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StatusRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StatusResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Version = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
- }
- m.DbSize = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DbSize |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
- }
- m.Leader = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Leader |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
- }
- m.RaftIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RaftIndex |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
- }
- m.RaftTerm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RaftTerm |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType)
- }
- m.RaftAppliedIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RaftAppliedIndex |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType)
- }
- m.DbSizeInUse = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DbSizeInUse |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IsLearner = bool(v != 0)
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field StorageVersion", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.StorageVersion = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthStatusRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthStatusRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Options == nil {
- m.Options = &authpb.UserAddOptions{}
- }
- if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.HashedPassword = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.HashedPassword = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.User = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Perm == nil {
- m.Perm = &authpb.Permission{}
- }
- if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthStatusResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthStatusResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Enabled = bool(v != 0)
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
- }
- m.AuthRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AuthRevision |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Token = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Perm = append(m.Perm, &authpb.Permission{})
- if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipRpc(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthRpc
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupRpc
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRpc
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/api/etcdserverpb/rpc.proto b/api/etcdserverpb/rpc.proto
deleted file mode 100644
index 9cdc0b37f6e..00000000000
--- a/api/etcdserverpb/rpc.proto
+++ /dev/null
@@ -1,1390 +0,0 @@
-syntax = "proto3";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-import "etcd/api/mvccpb/kv.proto";
-import "etcd/api/authpb/auth.proto";
-import "etcd/api/versionpb/version.proto";
-
-// for grpc-gateway
-import "google/api/annotations.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-service KV {
- // Range gets the keys in the range from the key-value store.
- rpc Range(RangeRequest) returns (RangeResponse) {
- option (google.api.http) = {
- post: "/v3/kv/range"
- body: "*"
- };
- }
-
- // Put puts the given key into the key-value store.
- // A put request increments the revision of the key-value store
- // and generates one event in the event history.
- rpc Put(PutRequest) returns (PutResponse) {
- option (google.api.http) = {
- post: "/v3/kv/put"
- body: "*"
- };
- }
-
- // DeleteRange deletes the given range from the key-value store.
- // A delete request increments the revision of the key-value store
- // and generates a delete event in the event history for every deleted key.
- rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
- option (google.api.http) = {
- post: "/v3/kv/deleterange"
- body: "*"
- };
- }
-
- // Txn processes multiple requests in a single transaction.
- // A txn request increments the revision of the key-value store
- // and generates events with the same revision for every completed request.
- // It is not allowed to modify the same key several times within one txn.
- rpc Txn(TxnRequest) returns (TxnResponse) {
- option (google.api.http) = {
- post: "/v3/kv/txn"
- body: "*"
- };
- }
-
- // Compact compacts the event history in the etcd key-value store. The key-value
- // store should be periodically compacted or the event history will continue to grow
- // indefinitely.
- rpc Compact(CompactionRequest) returns (CompactionResponse) {
- option (google.api.http) = {
- post: "/v3/kv/compaction"
- body: "*"
- };
- }
-}
-
-service Watch {
- // Watch watches for events happening or that have happened. Both input and output
- // are streams; the input stream is for creating and canceling watchers and the output
- // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
- // for several watches at once. The entire event history can be watched starting from the
- // last compaction revision.
- rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
- option (google.api.http) = {
- post: "/v3/watch"
- body: "*"
- };
- }
-}
-
-service Lease {
- // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
- // within a given time to live period. All keys attached to the lease will be expired and
- // deleted if the lease expires. Each expired key generates a delete event in the event history.
- rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
- option (google.api.http) = {
- post: "/v3/lease/grant"
- body: "*"
- };
- }
-
- // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
- rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
- option (google.api.http) = {
- post: "/v3/lease/revoke"
- body: "*"
- additional_bindings {
- post: "/v3/kv/lease/revoke"
- body: "*"
- }
- };
- }
-
- // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
- // to the server and streaming keep alive responses from the server to the client.
- rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
- option (google.api.http) = {
- post: "/v3/lease/keepalive"
- body: "*"
- };
- }
-
- // LeaseTimeToLive retrieves lease information.
- rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
- option (google.api.http) = {
- post: "/v3/lease/timetolive"
- body: "*"
- additional_bindings {
- post: "/v3/kv/lease/timetolive"
- body: "*"
- }
- };
- }
-
- // LeaseLeases lists all existing leases.
- rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
- option (google.api.http) = {
- post: "/v3/lease/leases"
- body: "*"
- additional_bindings {
- post: "/v3/kv/lease/leases"
- body: "*"
- }
- };
- }
-}
-
-service Cluster {
- // MemberAdd adds a member into the cluster.
- rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
- option (google.api.http) = {
- post: "/v3/cluster/member/add"
- body: "*"
- };
- }
-
- // MemberRemove removes an existing member from the cluster.
- rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
- option (google.api.http) = {
- post: "/v3/cluster/member/remove"
- body: "*"
- };
- }
-
- // MemberUpdate updates the member configuration.
- rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
- option (google.api.http) = {
- post: "/v3/cluster/member/update"
- body: "*"
- };
- }
-
- // MemberList lists all the members in the cluster.
- rpc MemberList(MemberListRequest) returns (MemberListResponse) {
- option (google.api.http) = {
- post: "/v3/cluster/member/list"
- body: "*"
- };
- }
-
- // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
- rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) {
- option (google.api.http) = {
- post: "/v3/cluster/member/promote"
- body: "*"
- };
- }
-}
-
-service Maintenance {
- // Alarm activates, deactivates, and queries alarms regarding cluster health.
- rpc Alarm(AlarmRequest) returns (AlarmResponse) {
- option (google.api.http) = {
- post: "/v3/maintenance/alarm"
- body: "*"
- };
- }
-
- // Status gets the status of the member.
- rpc Status(StatusRequest) returns (StatusResponse) {
- option (google.api.http) = {
- post: "/v3/maintenance/status"
- body: "*"
- };
- }
-
- // Defragment defragments a member's backend database to recover storage space.
- rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
- option (google.api.http) = {
- post: "/v3/maintenance/defragment"
- body: "*"
- };
- }
-
- // Hash computes the hash of whole backend keyspace,
- // including key, lease, and other buckets in storage.
- // This is designed for testing ONLY!
- // Do not rely on this in production with ongoing transactions,
- // since Hash operation does not hold MVCC locks.
- // Use "HashKV" API instead for "key" bucket consistency checks.
- rpc Hash(HashRequest) returns (HashResponse) {
- option (google.api.http) = {
- post: "/v3/maintenance/hash"
- body: "*"
- };
- }
-
- // HashKV computes the hash of all MVCC keys up to a given revision.
- // It only iterates "key" bucket in backend storage.
- rpc HashKV(HashKVRequest) returns (HashKVResponse) {
- option (google.api.http) = {
- post: "/v3/maintenance/hash"
- body: "*"
- };
- }
-
- // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
- rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
- option (google.api.http) = {
- post: "/v3/maintenance/snapshot"
- body: "*"
- };
- }
-
- // MoveLeader requests current leader node to transfer its leadership to transferee.
- rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
- option (google.api.http) = {
- post: "/v3/maintenance/transfer-leadership"
- body: "*"
- };
- }
-
- // Downgrade requests downgrades, verifies feasibility or cancels downgrade
- // on the cluster version.
- // Supported since etcd 3.5.
- rpc Downgrade(DowngradeRequest) returns (DowngradeResponse) {
- option (google.api.http) = {
- post: "/v3/maintenance/downgrade"
- body: "*"
- };
- }
-}
-
-service Auth {
- // AuthEnable enables authentication.
- rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
- option (google.api.http) = {
- post: "/v3/auth/enable"
- body: "*"
- };
- }
-
- // AuthDisable disables authentication.
- rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
- option (google.api.http) = {
- post: "/v3/auth/disable"
- body: "*"
- };
- }
-
- // AuthStatus displays authentication status.
- rpc AuthStatus(AuthStatusRequest) returns (AuthStatusResponse) {
- option (google.api.http) = {
- post: "/v3/auth/status"
- body: "*"
- };
- }
-
- // Authenticate processes an authenticate request.
- rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
- option (google.api.http) = {
- post: "/v3/auth/authenticate"
- body: "*"
- };
- }
-
- // UserAdd adds a new user. User name cannot be empty.
- rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
- option (google.api.http) = {
- post: "/v3/auth/user/add"
- body: "*"
- };
- }
-
- // UserGet gets detailed user information.
- rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
- option (google.api.http) = {
- post: "/v3/auth/user/get"
- body: "*"
- };
- }
-
- // UserList gets a list of all users.
- rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
- option (google.api.http) = {
- post: "/v3/auth/user/list"
- body: "*"
- };
- }
-
- // UserDelete deletes a specified user.
- rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
- option (google.api.http) = {
- post: "/v3/auth/user/delete"
- body: "*"
- };
- }
-
- // UserChangePassword changes the password of a specified user.
- rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
- option (google.api.http) = {
- post: "/v3/auth/user/changepw"
- body: "*"
- };
- }
-
- // UserGrant grants a role to a specified user.
- rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
- option (google.api.http) = {
- post: "/v3/auth/user/grant"
- body: "*"
- };
- }
-
- // UserRevokeRole revokes a role of specified user.
- rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
- option (google.api.http) = {
- post: "/v3/auth/user/revoke"
- body: "*"
- };
- }
-
- // RoleAdd adds a new role. Role name cannot be empty.
- rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
- option (google.api.http) = {
- post: "/v3/auth/role/add"
- body: "*"
- };
- }
-
- // RoleGet gets detailed role information.
- rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
- option (google.api.http) = {
- post: "/v3/auth/role/get"
- body: "*"
- };
- }
-
- // RoleList gets lists of all roles.
- rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
- option (google.api.http) = {
- post: "/v3/auth/role/list"
- body: "*"
- };
- }
-
- // RoleDelete deletes a specified role.
- rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
- option (google.api.http) = {
- post: "/v3/auth/role/delete"
- body: "*"
- };
- }
-
- // RoleGrantPermission grants a permission of a specified key or range to a specified role.
- rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
- option (google.api.http) = {
- post: "/v3/auth/role/grant"
- body: "*"
- };
- }
-
- // RoleRevokePermission revokes a key or range permission of a specified role.
- rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
- option (google.api.http) = {
- post: "/v3/auth/role/revoke"
- body: "*"
- };
- }
-}
-
-message ResponseHeader {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // cluster_id is the ID of the cluster which sent the response.
- uint64 cluster_id = 1;
- // member_id is the ID of the member which sent the response.
- uint64 member_id = 2;
- // revision is the key-value store revision when the request was applied, and it's
- // unset (so 0) in case of calls not interacting with key-value store.
- // For watch progress responses, the header.revision indicates progress. All future events
- // received in this stream are guaranteed to have a higher revision number than the
- // header.revision number.
- int64 revision = 3;
- // raft_term is the raft term when the request was applied.
- uint64 raft_term = 4;
-}
-
-message RangeRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- enum SortOrder {
- option (versionpb.etcd_version_enum) = "3.0";
- NONE = 0; // default, no sorting
- ASCEND = 1; // lowest target value first
- DESCEND = 2; // highest target value first
- }
- enum SortTarget {
- option (versionpb.etcd_version_enum) = "3.0";
- KEY = 0;
- VERSION = 1;
- CREATE = 2;
- MOD = 3;
- VALUE = 4;
- }
-
- // key is the first key for the range. If range_end is not given, the request only looks up key.
- bytes key = 1;
- // range_end is the upper bound on the requested range [key, range_end).
- // If range_end is '\0', the range is all keys >= key.
- // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
- // then the range request gets all keys prefixed with key.
- // If both key and range_end are '\0', then the range request returns all keys.
- bytes range_end = 2;
- // limit is a limit on the number of keys returned for the request. When limit is set to 0,
- // it is treated as no limit.
- int64 limit = 3;
- // revision is the point-in-time of the key-value store to use for the range.
- // If revision is less or equal to zero, the range is over the newest key-value store.
- // If the revision has been compacted, ErrCompacted is returned as a response.
- int64 revision = 4;
-
- // sort_order is the order for returned sorted results.
- SortOrder sort_order = 5;
-
- // sort_target is the key-value field to use for sorting.
- SortTarget sort_target = 6;
-
- // serializable sets the range request to use serializable member-local reads.
- // Range requests are linearizable by default; linearizable requests have higher
- // latency and lower throughput than serializable requests but reflect the current
- // consensus of the cluster. For better performance, in exchange for possible stale reads,
- // a serializable range request is served locally without needing to reach consensus
- // with other nodes in the cluster.
- bool serializable = 7;
-
- // keys_only when set returns only the keys and not the values.
- bool keys_only = 8;
-
- // count_only when set returns only the count of the keys in the range.
- bool count_only = 9;
-
- // min_mod_revision is the lower bound for returned key mod revisions; all keys with
- // lesser mod revisions will be filtered away.
- int64 min_mod_revision = 10 [(versionpb.etcd_version_field)="3.1"];
-
- // max_mod_revision is the upper bound for returned key mod revisions; all keys with
- // greater mod revisions will be filtered away.
- int64 max_mod_revision = 11 [(versionpb.etcd_version_field)="3.1"];
-
- // min_create_revision is the lower bound for returned key create revisions; all keys with
- // lesser create revisions will be filtered away.
- int64 min_create_revision = 12 [(versionpb.etcd_version_field)="3.1"];
-
- // max_create_revision is the upper bound for returned key create revisions; all keys with
- // greater create revisions will be filtered away.
- int64 max_create_revision = 13 [(versionpb.etcd_version_field)="3.1"];
-}
-
-message RangeResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // kvs is the list of key-value pairs matched by the range request.
- // kvs is empty when count is requested.
- repeated mvccpb.KeyValue kvs = 2;
- // more indicates if there are more keys to return in the requested range.
- bool more = 3;
- // count is set to the number of keys within the range when requested.
- int64 count = 4;
-}
-
-message PutRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // key is the key, in bytes, to put into the key-value store.
- bytes key = 1;
- // value is the value, in bytes, to associate with the key in the key-value store.
- bytes value = 2;
- // lease is the lease ID to associate with the key in the key-value store. A lease
- // value of 0 indicates no lease.
- int64 lease = 3;
-
- // If prev_kv is set, etcd gets the previous key-value pair before changing it.
- // The previous key-value pair will be returned in the put response.
- bool prev_kv = 4 [(versionpb.etcd_version_field)="3.1"];
-
- // If ignore_value is set, etcd updates the key using its current value.
- // Returns an error if the key does not exist.
- bool ignore_value = 5 [(versionpb.etcd_version_field)="3.2"];
-
- // If ignore_lease is set, etcd updates the key using its current lease.
- // Returns an error if the key does not exist.
- bool ignore_lease = 6 [(versionpb.etcd_version_field)="3.2"];
-}
-
-message PutResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // if prev_kv is set in the request, the previous key-value pair will be returned.
- mvccpb.KeyValue prev_kv = 2 [(versionpb.etcd_version_field)="3.1"];
-}
-
-message DeleteRangeRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // key is the first key to delete in the range.
- bytes key = 1;
- // range_end is the key following the last key to delete for the range [key, range_end).
- // If range_end is not given, the range is defined to contain only the key argument.
- // If range_end is one bit larger than the given key, then the range is all the keys
- // with the prefix (the given key).
- // If range_end is '\0', the range is all keys greater than or equal to the key argument.
- bytes range_end = 2;
-
- // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
- // The previous key-value pairs will be returned in the delete response.
- bool prev_kv = 3 [(versionpb.etcd_version_field)="3.1"];
-}
-
-message DeleteRangeResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // deleted is the number of keys deleted by the delete range request.
- int64 deleted = 2;
- // if prev_kv is set in the request, the previous key-value pairs will be returned.
- repeated mvccpb.KeyValue prev_kvs = 3 [(versionpb.etcd_version_field)="3.1"];
-}
-
-message RequestOp {
- option (versionpb.etcd_version_msg) = "3.0";
- // request is a union of request types accepted by a transaction.
- oneof request {
- RangeRequest request_range = 1;
- PutRequest request_put = 2;
- DeleteRangeRequest request_delete_range = 3;
- TxnRequest request_txn = 4 [(versionpb.etcd_version_field)="3.3"];
- }
-}
-
-message ResponseOp {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // response is a union of response types returned by a transaction.
- oneof response {
- RangeResponse response_range = 1;
- PutResponse response_put = 2;
- DeleteRangeResponse response_delete_range = 3;
- TxnResponse response_txn = 4 [(versionpb.etcd_version_field)="3.3"];
- }
-}
-
-message Compare {
- option (versionpb.etcd_version_msg) = "3.0";
-
- enum CompareResult {
- option (versionpb.etcd_version_enum) = "3.0";
-
- EQUAL = 0;
- GREATER = 1;
- LESS = 2;
- NOT_EQUAL = 3 [(versionpb.etcd_version_enum_value)="3.1"];
- }
- enum CompareTarget {
- option (versionpb.etcd_version_enum) = "3.0";
-
- VERSION = 0;
- CREATE = 1;
- MOD = 2;
- VALUE = 3;
- LEASE = 4 [(versionpb.etcd_version_enum_value)="3.3"];
- }
- // result is logical comparison operation for this comparison.
- CompareResult result = 1;
- // target is the key-value field to inspect for the comparison.
- CompareTarget target = 2;
- // key is the subject key for the comparison operation.
- bytes key = 3;
- oneof target_union {
- // version is the version of the given key
- int64 version = 4;
- // create_revision is the creation revision of the given key
- int64 create_revision = 5;
- // mod_revision is the last modified revision of the given key.
- int64 mod_revision = 6;
- // value is the value of the given key, in bytes.
- bytes value = 7;
- // lease is the lease id of the given key.
- int64 lease = 8 [(versionpb.etcd_version_field)="3.3"];
- // leave room for more target_union field tags, jump to 64
- }
-
- // range_end compares the given target to all keys in the range [key, range_end).
- // See RangeRequest for more details on key ranges.
- bytes range_end = 64 [(versionpb.etcd_version_field)="3.3"];
- // TODO: fill out with most of the rest of RangeRequest fields when needed.
-}
-
-// From google paxosdb paper:
-// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
-// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
-// and consists of three components:
-// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
-// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
-// may apply to the same or different entries in the database. All tests in the guard are applied and
-// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
-// it executes f op (see item 3 below).
-// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
-// lookup operation, and applies to a single database entry. Two different operations in the list may apply
-// to the same or different entries in the database. These operations are executed
-// if guard evaluates to
-// true.
-// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
-message TxnRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // compare is a list of predicates representing a conjunction of terms.
- // If the comparisons succeed, then the success requests will be processed in order,
- // and the response will contain their respective responses in order.
- // If the comparisons fail, then the failure requests will be processed in order,
- // and the response will contain their respective responses in order.
- repeated Compare compare = 1;
- // success is a list of requests which will be applied when compare evaluates to true.
- repeated RequestOp success = 2;
- // failure is a list of requests which will be applied when compare evaluates to false.
- repeated RequestOp failure = 3;
-}
-
-message TxnResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // succeeded is set to true if the compare evaluated to true or false otherwise.
- bool succeeded = 2;
- // responses is a list of responses corresponding to the results from applying
- // success if succeeded is true or failure if succeeded is false.
- repeated ResponseOp responses = 3;
-}
-
-// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
-// with a revision less than the compaction revision will be removed.
-message CompactionRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // revision is the key-value store revision for the compaction operation.
- int64 revision = 1;
- // physical is set so the RPC will wait until the compaction is physically
- // applied to the local database such that compacted entries are totally
- // removed from the backend database.
- bool physical = 2;
-}
-
-message CompactionResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message HashRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-}
-
-message HashKVRequest {
- option (versionpb.etcd_version_msg) = "3.3";
- // revision is the key-value store revision for the hash operation.
- int64 revision = 1;
-}
-
-message HashKVResponse {
- option (versionpb.etcd_version_msg) = "3.3";
-
- ResponseHeader header = 1;
- // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
- uint32 hash = 2;
- // compact_revision is the compacted revision of key-value store when hash begins.
- int64 compact_revision = 3;
- // hash_revision is the revision up to which the hash is calculated.
- int64 hash_revision = 4 [(versionpb.etcd_version_field)="3.6"];
-}
-
-message HashResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // hash is the hash value computed from the responding member's KV's backend.
- uint32 hash = 2;
-}
-
-message SnapshotRequest {
- option (versionpb.etcd_version_msg) = "3.3";
-}
-
-message SnapshotResponse {
- option (versionpb.etcd_version_msg) = "3.3";
-
- // header has the current key-value store information. The first header in the snapshot
- // stream indicates the point in time of the snapshot.
- ResponseHeader header = 1;
-
- // remaining_bytes is the number of blob bytes to be sent after this message
- uint64 remaining_bytes = 2;
-
- // blob contains the next chunk of the snapshot in the snapshot stream.
- bytes blob = 3;
-
- // local version of server that created the snapshot.
- // In cluster with binaries with different version, each cluster can return different result.
- // Informs which etcd server version should be used when restoring the snapshot.
- string version = 4 [(versionpb.etcd_version_field)="3.6"];
-}
-
-message WatchRequest {
- option (versionpb.etcd_version_msg) = "3.0";
- // request_union is a request to either create a new watcher or cancel an existing watcher.
- oneof request_union {
- WatchCreateRequest create_request = 1;
- WatchCancelRequest cancel_request = 2;
- WatchProgressRequest progress_request = 3 [(versionpb.etcd_version_field)="3.4"];
- }
-}
-
-message WatchCreateRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // key is the key to register for watching.
- bytes key = 1;
-
- // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
- // only the key argument is watched. If range_end is equal to '\0', all keys greater than
- // or equal to the key argument are watched.
- // If the range_end is one bit larger than the given key,
- // then all keys with the prefix (the given key) will be watched.
- bytes range_end = 2;
-
- // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
- int64 start_revision = 3;
-
- // progress_notify is set so that the etcd server will periodically send a WatchResponse with
- // no events to the new watcher if there are no recent events. It is useful when clients
- // wish to recover a disconnected watcher starting from a recent known revision.
- // The etcd server may decide how often it will send notifications based on current load.
- bool progress_notify = 4;
-
- enum FilterType {
- option (versionpb.etcd_version_enum) = "3.1";
-
- // filter out put event.
- NOPUT = 0;
- // filter out delete event.
- NODELETE = 1;
- }
-
- // filters filter the events at server side before it sends back to the watcher.
- repeated FilterType filters = 5 [(versionpb.etcd_version_field)="3.1"];
-
- // If prev_kv is set, created watcher gets the previous KV before the event happens.
- // If the previous KV is already compacted, nothing will be returned.
- bool prev_kv = 6 [(versionpb.etcd_version_field)="3.1"];
-
- // If watch_id is provided and non-zero, it will be assigned to this watcher.
- // Since creating a watcher in etcd is not a synchronous operation,
- // this can be used ensure that ordering is correct when creating multiple
- // watchers on the same stream. Creating a watcher with an ID already in
- // use on the stream will cause an error to be returned.
- int64 watch_id = 7 [(versionpb.etcd_version_field)="3.4"];
-
- // fragment enables splitting large revisions into multiple watch responses.
- bool fragment = 8 [(versionpb.etcd_version_field)="3.4"];
-}
-
-message WatchCancelRequest {
- option (versionpb.etcd_version_msg) = "3.1";
- // watch_id is the watcher id to cancel so that no more events are transmitted.
- int64 watch_id = 1 [(versionpb.etcd_version_field)="3.1"];
-}
-
-// Requests the a watch stream progress status be sent in the watch response stream as soon as
-// possible.
-message WatchProgressRequest {
- option (versionpb.etcd_version_msg) = "3.4";
-}
-
-message WatchResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // watch_id is the ID of the watcher that corresponds to the response.
- int64 watch_id = 2;
-
- // created is set to true if the response is for a create watch request.
- // The client should record the watch_id and expect to receive events for
- // the created watcher from the same stream.
- // All events sent to the created watcher will attach with the same watch_id.
- bool created = 3;
-
- // canceled is set to true if the response is for a cancel watch request.
- // No further events will be sent to the canceled watcher.
- bool canceled = 4;
-
- // compact_revision is set to the minimum index if a watcher tries to watch
- // at a compacted index.
- //
- // This happens when creating a watcher at a compacted revision or the watcher cannot
- // catch up with the progress of the key-value store.
- //
- // The client should treat the watcher as canceled and should not try to create any
- // watcher with the same start_revision again.
- int64 compact_revision = 5;
-
- // cancel_reason indicates the reason for canceling the watcher.
- string cancel_reason = 6 [(versionpb.etcd_version_field)="3.4"];
-
- // framgment is true if large watch response was split over multiple responses.
- bool fragment = 7 [(versionpb.etcd_version_field)="3.4"];
-
- repeated mvccpb.Event events = 11;
-}
-
-message LeaseGrantRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
- int64 TTL = 1;
- // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
- int64 ID = 2;
-}
-
-message LeaseGrantResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // ID is the lease ID for the granted lease.
- int64 ID = 2;
- // TTL is the server chosen lease time-to-live in seconds.
- int64 TTL = 3;
- string error = 4;
-}
-
-message LeaseRevokeRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
- int64 ID = 1;
-}
-
-message LeaseRevokeResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message LeaseCheckpoint {
- option (versionpb.etcd_version_msg) = "3.4";
-
- // ID is the lease ID to checkpoint.
- int64 ID = 1;
-
- // Remaining_TTL is the remaining time until expiry of the lease.
- int64 remaining_TTL = 2;
-}
-
-message LeaseCheckpointRequest {
- option (versionpb.etcd_version_msg) = "3.4";
-
- repeated LeaseCheckpoint checkpoints = 1;
-}
-
-message LeaseCheckpointResponse {
- option (versionpb.etcd_version_msg) = "3.4";
-
- ResponseHeader header = 1;
-}
-
-message LeaseKeepAliveRequest {
- option (versionpb.etcd_version_msg) = "3.0";
- // ID is the lease ID for the lease to keep alive.
- int64 ID = 1;
-}
-
-message LeaseKeepAliveResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // ID is the lease ID from the keep alive request.
- int64 ID = 2;
- // TTL is the new time-to-live for the lease.
- int64 TTL = 3;
-}
-
-message LeaseTimeToLiveRequest {
- option (versionpb.etcd_version_msg) = "3.1";
- // ID is the lease ID for the lease.
- int64 ID = 1;
- // keys is true to query all the keys attached to this lease.
- bool keys = 2;
-}
-
-message LeaseTimeToLiveResponse {
- option (versionpb.etcd_version_msg) = "3.1";
-
- ResponseHeader header = 1;
- // ID is the lease ID from the keep alive request.
- int64 ID = 2;
- // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
- int64 TTL = 3;
- // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
- int64 grantedTTL = 4;
- // Keys is the list of keys attached to this lease.
- repeated bytes keys = 5;
-}
-
-message LeaseLeasesRequest {
- option (versionpb.etcd_version_msg) = "3.3";
-}
-
-message LeaseStatus {
- option (versionpb.etcd_version_msg) = "3.3";
-
- int64 ID = 1;
- // TODO: int64 TTL = 2;
-}
-
-message LeaseLeasesResponse {
- option (versionpb.etcd_version_msg) = "3.3";
-
- ResponseHeader header = 1;
- repeated LeaseStatus leases = 2;
-}
-
-message Member {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // ID is the member ID for this member.
- uint64 ID = 1;
- // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
- string name = 2;
- // peerURLs is the list of URLs the member exposes to the cluster for communication.
- repeated string peerURLs = 3;
- // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
- repeated string clientURLs = 4;
- // isLearner indicates if the member is raft learner.
- bool isLearner = 5 [(versionpb.etcd_version_field)="3.4"];
-}
-
-message MemberAddRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // peerURLs is the list of URLs the added member will use to communicate with the cluster.
- repeated string peerURLs = 1;
- // isLearner indicates if the added member is raft learner.
- bool isLearner = 2 [(versionpb.etcd_version_field)="3.4"];
-}
-
-message MemberAddResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // member is the member information for the added member.
- Member member = 2;
- // members is a list of all members after adding the new member.
- repeated Member members = 3;
-}
-
-message MemberRemoveRequest {
- option (versionpb.etcd_version_msg) = "3.0";
- // ID is the member ID of the member to remove.
- uint64 ID = 1;
-}
-
-message MemberRemoveResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // members is a list of all members after removing the member.
- repeated Member members = 2;
-}
-
-message MemberUpdateRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // ID is the member ID of the member to update.
- uint64 ID = 1;
- // peerURLs is the new list of URLs the member will use to communicate with the cluster.
- repeated string peerURLs = 2;
-}
-
-message MemberUpdateResponse{
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // members is a list of all members after updating the member.
- repeated Member members = 2 [(versionpb.etcd_version_field)="3.1"];
-}
-
-message MemberListRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- bool linearizable = 1 [(versionpb.etcd_version_field)="3.5"];
-}
-
-message MemberListResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // members is a list of all members associated with the cluster.
- repeated Member members = 2;
-}
-
-message MemberPromoteRequest {
- option (versionpb.etcd_version_msg) = "3.4";
- // ID is the member ID of the member to promote.
- uint64 ID = 1;
-}
-
-message MemberPromoteResponse {
- option (versionpb.etcd_version_msg) = "3.4";
-
- ResponseHeader header = 1;
- // members is a list of all members after promoting the member.
- repeated Member members = 2;
-}
-
-message DefragmentRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-}
-
-message DefragmentResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message MoveLeaderRequest {
- option (versionpb.etcd_version_msg) = "3.3";
- // targetID is the node ID for the new leader.
- uint64 targetID = 1;
-}
-
-message MoveLeaderResponse {
- option (versionpb.etcd_version_msg) = "3.3";
-
- ResponseHeader header = 1;
-}
-
-enum AlarmType {
- option (versionpb.etcd_version_enum) = "3.0";
-
- NONE = 0; // default, used to query if any alarm is active
- NOSPACE = 1; // space quota is exhausted
- CORRUPT = 2 [(versionpb.etcd_version_enum_value)="3.3"]; // kv store corruption detected
-}
-
-message AlarmRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- enum AlarmAction {
- option (versionpb.etcd_version_enum) = "3.0";
-
- GET = 0;
- ACTIVATE = 1;
- DEACTIVATE = 2;
- }
- // action is the kind of alarm request to issue. The action
- // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
- // raised alarm.
- AlarmAction action = 1;
- // memberID is the ID of the member associated with the alarm. If memberID is 0, the
- // alarm request covers all members.
- uint64 memberID = 2;
- // alarm is the type of alarm to consider for this request.
- AlarmType alarm = 3;
-}
-
-message AlarmMember {
- option (versionpb.etcd_version_msg) = "3.0";
- // memberID is the ID of the member associated with the raised alarm.
- uint64 memberID = 1;
- // alarm is the type of alarm which has been raised.
- AlarmType alarm = 2;
-}
-
-message AlarmResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // alarms is a list of alarms associated with the alarm request.
- repeated AlarmMember alarms = 2;
-}
-
-message DowngradeRequest {
- option (versionpb.etcd_version_msg) = "3.5";
-
- enum DowngradeAction {
- option (versionpb.etcd_version_enum) = "3.5";
-
- VALIDATE = 0;
- ENABLE = 1;
- CANCEL = 2;
- }
-
- // action is the kind of downgrade request to issue. The action may
- // VALIDATE the target version, DOWNGRADE the cluster version,
- // or CANCEL the current downgrading job.
- DowngradeAction action = 1;
- // version is the target version to downgrade.
- string version = 2;
-}
-
-message DowngradeResponse {
- option (versionpb.etcd_version_msg) = "3.5";
-
- ResponseHeader header = 1;
- // version is the current cluster version.
- string version = 2;
-}
-
-message StatusRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-}
-
-message StatusResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // version is the cluster protocol version used by the responding member.
- string version = 2;
- // dbSize is the size of the backend database physically allocated, in bytes, of the responding member.
- int64 dbSize = 3;
- // leader is the member ID which the responding member believes is the current leader.
- uint64 leader = 4;
- // raftIndex is the current raft committed index of the responding member.
- uint64 raftIndex = 5;
- // raftTerm is the current raft term of the responding member.
- uint64 raftTerm = 6;
- // raftAppliedIndex is the current raft applied index of the responding member.
- uint64 raftAppliedIndex = 7 [(versionpb.etcd_version_field)="3.4"];
- // errors contains alarm/health information and status.
- repeated string errors = 8 [(versionpb.etcd_version_field)="3.4"];
- // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
- int64 dbSizeInUse = 9 [(versionpb.etcd_version_field)="3.4"];
- // isLearner indicates if the member is raft learner.
- bool isLearner = 10 [(versionpb.etcd_version_field)="3.4"];
- // storageVersion is the version of the db file. It might be get updated with delay in relationship to the target cluster version.
- string storageVersion = 11 [(versionpb.etcd_version_field)="3.6"];
-}
-
-message AuthEnableRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-}
-
-message AuthDisableRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-}
-
-message AuthStatusRequest {
- option (versionpb.etcd_version_msg) = "3.5";
-}
-
-message AuthenticateRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- string name = 1;
- string password = 2;
-}
-
-message AuthUserAddRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- string name = 1;
- string password = 2;
- authpb.UserAddOptions options = 3 [(versionpb.etcd_version_field)="3.4"];
- string hashedPassword = 4 [(versionpb.etcd_version_field)="3.5"];
-}
-
-message AuthUserGetRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- string name = 1;
-}
-
-message AuthUserDeleteRequest {
- option (versionpb.etcd_version_msg) = "3.0";
- // name is the name of the user to delete.
- string name = 1;
-}
-
-message AuthUserChangePasswordRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // name is the name of the user whose password is being changed.
- string name = 1;
- // password is the new password for the user. Note that this field will be removed in the API layer.
- string password = 2;
- // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.
- string hashedPassword = 3 [(versionpb.etcd_version_field)="3.5"];
-}
-
-message AuthUserGrantRoleRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // user is the name of the user which should be granted a given role.
- string user = 1;
- // role is the name of the role to grant to the user.
- string role = 2;
-}
-
-message AuthUserRevokeRoleRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- string name = 1;
- string role = 2;
-}
-
-message AuthRoleAddRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // name is the name of the role to add to the authentication system.
- string name = 1;
-}
-
-message AuthRoleGetRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- string role = 1;
-}
-
-message AuthUserListRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-}
-
-message AuthRoleListRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-}
-
-message AuthRoleDeleteRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- string role = 1;
-}
-
-message AuthRoleGrantPermissionRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- // name is the name of the role which will be granted the permission.
- string name = 1;
- // perm is the permission to grant to the role.
- authpb.Permission perm = 2;
-}
-
-message AuthRoleRevokePermissionRequest {
- option (versionpb.etcd_version_msg) = "3.0";
-
- string role = 1;
- bytes key = 2;
- bytes range_end = 3;
-}
-
-message AuthEnableResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthDisableResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthStatusResponse {
- option (versionpb.etcd_version_msg) = "3.5";
-
- ResponseHeader header = 1;
- bool enabled = 2;
- // authRevision is the current revision of auth store
- uint64 authRevision = 3;
-}
-
-message AuthenticateResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
- // token is an authorized token that can be used in succeeding RPCs
- string token = 2;
-}
-
-message AuthUserAddResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthUserGetResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-
- repeated string roles = 2;
-}
-
-message AuthUserDeleteResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthUserChangePasswordResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthUserGrantRoleResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthUserRevokeRoleResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthRoleAddResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthRoleGetResponse {
- ResponseHeader header = 1 [(versionpb.etcd_version_field)="3.0"];
-
- repeated authpb.Permission perm = 2 [(versionpb.etcd_version_field)="3.0"];
-}
-
-message AuthRoleListResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-
- repeated string roles = 2;
-}
-
-message AuthUserListResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-
- repeated string users = 2;
-}
-
-message AuthRoleDeleteResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthRoleGrantPermissionResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
-
-message AuthRoleRevokePermissionResponse {
- option (versionpb.etcd_version_msg) = "3.0";
-
- ResponseHeader header = 1;
-}
diff --git a/api/go.mod b/api/go.mod
deleted file mode 100644
index b2ee57fbb37..00000000000
--- a/api/go.mod
+++ /dev/null
@@ -1,33 +0,0 @@
-module go.etcd.io/etcd/api/v3
-
-go 1.19
-
-require (
- github.com/coreos/go-semver v0.3.1
- github.com/gogo/protobuf v1.3.2
- github.com/golang/protobuf v1.5.2
- github.com/grpc-ecosystem/grpc-gateway v1.16.0
- github.com/stretchr/testify v1.8.1
- google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1
- google.golang.org/grpc v1.51.0
-)
-
-require (
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- golang.org/x/net v0.4.0 // indirect
- golang.org/x/sys v0.3.0 // indirect
- golang.org/x/text v0.5.0 // indirect
- google.golang.org/protobuf v1.28.1 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
-)
-
-// Bad imports are sometimes causing attempts to pull that code.
-// This makes the error more explicit.
-replace (
- go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/api/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/pkg/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/tests/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
-)
diff --git a/api/go.sum b/api/go.sum
deleted file mode 100644
index 00c60e35229..00000000000
--- a/api/go.sum
+++ /dev/null
@@ -1,168 +0,0 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
-github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
-golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
-golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
-google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/api/membershippb/membership.pb.go b/api/membershippb/membership.pb.go
deleted file mode 100644
index 386185f0f8d..00000000000
--- a/api/membershippb/membership.pb.go
+++ /dev/null
@@ -1,1458 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: membership.proto
-
-package membershippb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
- _ "go.etcd.io/etcd/api/v3/versionpb"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// RaftAttributes represents the raft related attributes of an etcd member.
-type RaftAttributes struct {
- // peerURLs is the list of peers in the raft cluster.
- PeerUrls []string `protobuf:"bytes,1,rep,name=peer_urls,json=peerUrls,proto3" json:"peer_urls,omitempty"`
- // isLearner indicates if the member is raft learner.
- IsLearner bool `protobuf:"varint,2,opt,name=is_learner,json=isLearner,proto3" json:"is_learner,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RaftAttributes) Reset() { *m = RaftAttributes{} }
-func (m *RaftAttributes) String() string { return proto.CompactTextString(m) }
-func (*RaftAttributes) ProtoMessage() {}
-func (*RaftAttributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_949fe0d019050ef5, []int{0}
-}
-func (m *RaftAttributes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RaftAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RaftAttributes.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RaftAttributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RaftAttributes.Merge(m, src)
-}
-func (m *RaftAttributes) XXX_Size() int {
- return m.Size()
-}
-func (m *RaftAttributes) XXX_DiscardUnknown() {
- xxx_messageInfo_RaftAttributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RaftAttributes proto.InternalMessageInfo
-
-// Attributes represents all the non-raft related attributes of an etcd member.
-type Attributes struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- ClientUrls []string `protobuf:"bytes,2,rep,name=client_urls,json=clientUrls,proto3" json:"client_urls,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Attributes) Reset() { *m = Attributes{} }
-func (m *Attributes) String() string { return proto.CompactTextString(m) }
-func (*Attributes) ProtoMessage() {}
-func (*Attributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_949fe0d019050ef5, []int{1}
-}
-func (m *Attributes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Attributes.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Attributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Attributes.Merge(m, src)
-}
-func (m *Attributes) XXX_Size() int {
- return m.Size()
-}
-func (m *Attributes) XXX_DiscardUnknown() {
- xxx_messageInfo_Attributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Attributes proto.InternalMessageInfo
-
-type Member struct {
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- RaftAttributes *RaftAttributes `protobuf:"bytes,2,opt,name=raft_attributes,json=raftAttributes,proto3" json:"raft_attributes,omitempty"`
- MemberAttributes *Attributes `protobuf:"bytes,3,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Member) Reset() { *m = Member{} }
-func (m *Member) String() string { return proto.CompactTextString(m) }
-func (*Member) ProtoMessage() {}
-func (*Member) Descriptor() ([]byte, []int) {
- return fileDescriptor_949fe0d019050ef5, []int{2}
-}
-func (m *Member) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Member.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Member) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Member.Merge(m, src)
-}
-func (m *Member) XXX_Size() int {
- return m.Size()
-}
-func (m *Member) XXX_DiscardUnknown() {
- xxx_messageInfo_Member.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Member proto.InternalMessageInfo
-
-type ClusterVersionSetRequest struct {
- Ver string `protobuf:"bytes,1,opt,name=ver,proto3" json:"ver,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ClusterVersionSetRequest) Reset() { *m = ClusterVersionSetRequest{} }
-func (m *ClusterVersionSetRequest) String() string { return proto.CompactTextString(m) }
-func (*ClusterVersionSetRequest) ProtoMessage() {}
-func (*ClusterVersionSetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_949fe0d019050ef5, []int{3}
-}
-func (m *ClusterVersionSetRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterVersionSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ClusterVersionSetRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ClusterVersionSetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterVersionSetRequest.Merge(m, src)
-}
-func (m *ClusterVersionSetRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterVersionSetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterVersionSetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterVersionSetRequest proto.InternalMessageInfo
-
-type ClusterMemberAttrSetRequest struct {
- Member_ID uint64 `protobuf:"varint,1,opt,name=member_ID,json=memberID,proto3" json:"member_ID,omitempty"`
- MemberAttributes *Attributes `protobuf:"bytes,2,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ClusterMemberAttrSetRequest) Reset() { *m = ClusterMemberAttrSetRequest{} }
-func (m *ClusterMemberAttrSetRequest) String() string { return proto.CompactTextString(m) }
-func (*ClusterMemberAttrSetRequest) ProtoMessage() {}
-func (*ClusterMemberAttrSetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_949fe0d019050ef5, []int{4}
-}
-func (m *ClusterMemberAttrSetRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterMemberAttrSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ClusterMemberAttrSetRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ClusterMemberAttrSetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterMemberAttrSetRequest.Merge(m, src)
-}
-func (m *ClusterMemberAttrSetRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterMemberAttrSetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterMemberAttrSetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterMemberAttrSetRequest proto.InternalMessageInfo
-
-type DowngradeInfoSetRequest struct {
- Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
- Ver string `protobuf:"bytes,2,opt,name=ver,proto3" json:"ver,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DowngradeInfoSetRequest) Reset() { *m = DowngradeInfoSetRequest{} }
-func (m *DowngradeInfoSetRequest) String() string { return proto.CompactTextString(m) }
-func (*DowngradeInfoSetRequest) ProtoMessage() {}
-func (*DowngradeInfoSetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_949fe0d019050ef5, []int{5}
-}
-func (m *DowngradeInfoSetRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DowngradeInfoSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DowngradeInfoSetRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DowngradeInfoSetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DowngradeInfoSetRequest.Merge(m, src)
-}
-func (m *DowngradeInfoSetRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DowngradeInfoSetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DowngradeInfoSetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DowngradeInfoSetRequest proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*RaftAttributes)(nil), "membershippb.RaftAttributes")
- proto.RegisterType((*Attributes)(nil), "membershippb.Attributes")
- proto.RegisterType((*Member)(nil), "membershippb.Member")
- proto.RegisterType((*ClusterVersionSetRequest)(nil), "membershippb.ClusterVersionSetRequest")
- proto.RegisterType((*ClusterMemberAttrSetRequest)(nil), "membershippb.ClusterMemberAttrSetRequest")
- proto.RegisterType((*DowngradeInfoSetRequest)(nil), "membershippb.DowngradeInfoSetRequest")
-}
-
-func init() { proto.RegisterFile("membership.proto", fileDescriptor_949fe0d019050ef5) }
-
-var fileDescriptor_949fe0d019050ef5 = []byte{
- // 401 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xcd, 0xae, 0xd2, 0x40,
- 0x14, 0xbe, 0xd3, 0xde, 0xdc, 0xdb, 0x9e, 0x6b, 0x10, 0x27, 0x24, 0x36, 0xa0, 0xb5, 0x61, 0xc5,
- 0xaa, 0x24, 0x12, 0x36, 0xee, 0x54, 0x58, 0x60, 0xc4, 0xc5, 0x18, 0xdc, 0x92, 0x29, 0x1c, 0xb0,
- 0x49, 0x69, 0xeb, 0xcc, 0x14, 0xf7, 0x2e, 0x7d, 0x02, 0xdf, 0xc2, 0x95, 0xef, 0xc0, 0xd2, 0x47,
- 0x50, 0x7c, 0x11, 0xd3, 0x99, 0x42, 0x4b, 0x74, 0x75, 0x77, 0xa7, 0x5f, 0xcf, 0xf9, 0xfe, 0x5a,
- 0x68, 0xef, 0x70, 0x17, 0xa1, 0x90, 0x1f, 0xe3, 0x3c, 0xcc, 0x45, 0xa6, 0x32, 0xfa, 0xa0, 0x46,
- 0xf2, 0xa8, 0xdb, 0xd9, 0x66, 0xdb, 0x4c, 0xbf, 0x18, 0x96, 0x93, 0xd9, 0xe9, 0x06, 0xa8, 0x56,
- 0xeb, 0x21, 0xcf, 0xe3, 0xe1, 0x1e, 0x85, 0x8c, 0xb3, 0x34, 0x8f, 0x4e, 0x93, 0xd9, 0xe8, 0x2f,
- 0xa0, 0xc5, 0xf8, 0x46, 0xbd, 0x54, 0x4a, 0xc4, 0x51, 0xa1, 0x50, 0xd2, 0x1e, 0xb8, 0x39, 0xa2,
- 0x58, 0x16, 0x22, 0x91, 0x1e, 0x09, 0xec, 0x81, 0xcb, 0x9c, 0x12, 0x58, 0x88, 0x44, 0xd2, 0xa7,
- 0x00, 0xb1, 0x5c, 0x26, 0xc8, 0x45, 0x8a, 0xc2, 0xb3, 0x02, 0x32, 0x70, 0x98, 0x1b, 0xcb, 0xb7,
- 0x06, 0x78, 0x71, 0xfb, 0xe5, 0x87, 0x67, 0x8f, 0xc2, 0x71, 0xff, 0x0d, 0x40, 0x83, 0x92, 0xc2,
- 0x75, 0xca, 0x77, 0xe8, 0x91, 0x80, 0x0c, 0x5c, 0xa6, 0x67, 0xfa, 0x0c, 0xee, 0x56, 0x49, 0x8c,
- 0xa9, 0x32, 0x42, 0x96, 0x16, 0x02, 0x03, 0x95, 0x52, 0x35, 0xd7, 0x77, 0x02, 0x37, 0x73, 0x9d,
- 0x95, 0xb6, 0xc0, 0x9a, 0x4d, 0x34, 0xcd, 0x35, 0xb3, 0x66, 0x13, 0x3a, 0x85, 0x87, 0x82, 0x6f,
- 0xd4, 0x92, 0x9f, 0xb5, 0xb4, 0xa7, 0xbb, 0xe7, 0x4f, 0xc2, 0x66, 0x3b, 0xe1, 0x65, 0x44, 0xd6,
- 0x12, 0x97, 0x91, 0xa7, 0xf0, 0xc8, 0xac, 0x37, 0x89, 0x6c, 0x4d, 0xe4, 0x5d, 0x12, 0x35, 0x48,
- 0xaa, 0x2f, 0x52, 0x23, 0xb5, 0xe3, 0x31, 0x78, 0xaf, 0x93, 0x42, 0x2a, 0x14, 0x1f, 0x4c, 0xd9,
- 0xef, 0x51, 0x31, 0xfc, 0x54, 0xa0, 0x54, 0xb4, 0x0d, 0xf6, 0x1e, 0x45, 0x55, 0x45, 0x39, 0xd6,
- 0x67, 0x5f, 0x09, 0xf4, 0xaa, 0xbb, 0xf9, 0x99, 0xbb, 0x71, 0xda, 0x03, 0xb7, 0xb2, 0x79, 0x2e,
- 0xc1, 0x31, 0x80, 0xae, 0xe2, 0x3f, 0x19, 0xac, 0xfb, 0x67, 0x78, 0x07, 0x8f, 0x27, 0xd9, 0xe7,
- 0x74, 0x2b, 0xf8, 0x1a, 0x67, 0xe9, 0x26, 0x6b, 0xf8, 0xf0, 0xe0, 0x16, 0x53, 0x1e, 0x25, 0xb8,
- 0xd6, 0x2e, 0x1c, 0x76, 0x7a, 0x3c, 0x85, 0xb3, 0xfe, 0x0d, 0xf7, 0xaa, 0x73, 0xf8, 0xed, 0x5f,
- 0x1d, 0x8e, 0x3e, 0xf9, 0x79, 0xf4, 0xc9, 0xaf, 0xa3, 0x4f, 0xbe, 0xfd, 0xf1, 0xaf, 0xa2, 0x1b,
- 0xfd, 0x17, 0x8e, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0xf1, 0xa3, 0xbd, 0xee, 0xdf, 0x02, 0x00,
- 0x00,
-}
-
-func (m *RaftAttributes) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RaftAttributes) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RaftAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.IsLearner {
- i--
- if m.IsLearner {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if len(m.PeerUrls) > 0 {
- for iNdEx := len(m.PeerUrls) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.PeerUrls[iNdEx])
- copy(dAtA[i:], m.PeerUrls[iNdEx])
- i = encodeVarintMembership(dAtA, i, uint64(len(m.PeerUrls[iNdEx])))
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Attributes) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Attributes) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Attributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.ClientUrls) > 0 {
- for iNdEx := len(m.ClientUrls) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ClientUrls[iNdEx])
- copy(dAtA[i:], m.ClientUrls[iNdEx])
- i = encodeVarintMembership(dAtA, i, uint64(len(m.ClientUrls[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintMembership(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Member) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Member) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.MemberAttributes != nil {
- {
- size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMembership(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.RaftAttributes != nil {
- {
- size, err := m.RaftAttributes.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMembership(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.ID != 0 {
- i = encodeVarintMembership(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ClusterVersionSetRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ClusterVersionSetRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ClusterVersionSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Ver) > 0 {
- i -= len(m.Ver)
- copy(dAtA[i:], m.Ver)
- i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ClusterMemberAttrSetRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ClusterMemberAttrSetRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ClusterMemberAttrSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.MemberAttributes != nil {
- {
- size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintMembership(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Member_ID != 0 {
- i = encodeVarintMembership(dAtA, i, uint64(m.Member_ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DowngradeInfoSetRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DowngradeInfoSetRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DowngradeInfoSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Ver) > 0 {
- i -= len(m.Ver)
- copy(dAtA[i:], m.Ver)
- i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver)))
- i--
- dAtA[i] = 0x12
- }
- if m.Enabled {
- i--
- if m.Enabled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintMembership(dAtA []byte, offset int, v uint64) int {
- offset -= sovMembership(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *RaftAttributes) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.PeerUrls) > 0 {
- for _, s := range m.PeerUrls {
- l = len(s)
- n += 1 + l + sovMembership(uint64(l))
- }
- }
- if m.IsLearner {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Attributes) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovMembership(uint64(l))
- }
- if len(m.ClientUrls) > 0 {
- for _, s := range m.ClientUrls {
- l = len(s)
- n += 1 + l + sovMembership(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Member) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovMembership(uint64(m.ID))
- }
- if m.RaftAttributes != nil {
- l = m.RaftAttributes.Size()
- n += 1 + l + sovMembership(uint64(l))
- }
- if m.MemberAttributes != nil {
- l = m.MemberAttributes.Size()
- n += 1 + l + sovMembership(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ClusterVersionSetRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Ver)
- if l > 0 {
- n += 1 + l + sovMembership(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ClusterMemberAttrSetRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Member_ID != 0 {
- n += 1 + sovMembership(uint64(m.Member_ID))
- }
- if m.MemberAttributes != nil {
- l = m.MemberAttributes.Size()
- n += 1 + l + sovMembership(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DowngradeInfoSetRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Enabled {
- n += 2
- }
- l = len(m.Ver)
- if l > 0 {
- n += 1 + l + sovMembership(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovMembership(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozMembership(x uint64) (n int) {
- return sovMembership(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *RaftAttributes) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RaftAttributes: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RaftAttributes: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerUrls", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMembership
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMembership
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerUrls = append(m.PeerUrls, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IsLearner = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipMembership(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMembership
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Attributes) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Attributes: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Attributes: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMembership
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMembership
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientUrls", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMembership
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMembership
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientUrls = append(m.ClientUrls, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMembership(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMembership
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Member) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Member: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RaftAttributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMembership
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMembership
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.RaftAttributes == nil {
- m.RaftAttributes = &RaftAttributes{}
- }
- if err := m.RaftAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMembership
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMembership
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.MemberAttributes == nil {
- m.MemberAttributes = &Attributes{}
- }
- if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMembership(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMembership
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ClusterVersionSetRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ClusterVersionSetRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ClusterVersionSetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMembership
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMembership
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Ver = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMembership(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMembership
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ClusterMemberAttrSetRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ClusterMemberAttrSetRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ClusterMemberAttrSetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Member_ID", wireType)
- }
- m.Member_ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Member_ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthMembership
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthMembership
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.MemberAttributes == nil {
- m.MemberAttributes = &Attributes{}
- }
- if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMembership(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMembership
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DowngradeInfoSetRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DowngradeInfoSetRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DowngradeInfoSetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Enabled = bool(v != 0)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthMembership
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthMembership
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Ver = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipMembership(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthMembership
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipMembership(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowMembership
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthMembership
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupMembership
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthMembership
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthMembership = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowMembership = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupMembership = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/api/mvccpb/kv.pb.go b/api/mvccpb/kv.pb.go
deleted file mode 100644
index fc258d6c206..00000000000
--- a/api/mvccpb/kv.pb.go
+++ /dev/null
@@ -1,798 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: kv.proto
-
-package mvccpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type Event_EventType int32
-
-const (
- PUT Event_EventType = 0
- DELETE Event_EventType = 1
-)
-
-var Event_EventType_name = map[int32]string{
- 0: "PUT",
- 1: "DELETE",
-}
-
-var Event_EventType_value = map[string]int32{
- "PUT": 0,
- "DELETE": 1,
-}
-
-func (x Event_EventType) String() string {
- return proto.EnumName(Event_EventType_name, int32(x))
-}
-
-func (Event_EventType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_2216fe83c9c12408, []int{1, 0}
-}
-
-type KeyValue struct {
- // key is the key in bytes. An empty key is not allowed.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // create_revision is the revision of last creation on this key.
- CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
- // mod_revision is the revision of last modification on this key.
- ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
- // version is the version of the key. A deletion resets
- // the version to zero and any modification of the key
- // increases its version.
- Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
- // value is the value held by the key, in bytes.
- Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
- // lease is the ID of the lease that attached to key.
- // When the attached lease expires, the key will be deleted.
- // If lease is 0, then no lease is attached to the key.
- Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *KeyValue) Reset() { *m = KeyValue{} }
-func (m *KeyValue) String() string { return proto.CompactTextString(m) }
-func (*KeyValue) ProtoMessage() {}
-func (*KeyValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_2216fe83c9c12408, []int{0}
-}
-func (m *KeyValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KeyValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KeyValue.Merge(m, src)
-}
-func (m *KeyValue) XXX_Size() int {
- return m.Size()
-}
-func (m *KeyValue) XXX_DiscardUnknown() {
- xxx_messageInfo_KeyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KeyValue proto.InternalMessageInfo
-
-type Event struct {
- // type is the kind of event. If type is a PUT, it indicates
- // new data has been stored to the key. If type is a DELETE,
- // it indicates the key was deleted.
- Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
- // kv holds the KeyValue for the event.
- // A PUT event contains current kv pair.
- // A PUT event with kv.Version=1 indicates the creation of a key.
- // A DELETE/EXPIRE event contains the deleted key with
- // its modification revision set to the revision of deletion.
- Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
- // prev_kv holds the key-value pair before the event happens.
- PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Event) Reset() { *m = Event{} }
-func (m *Event) String() string { return proto.CompactTextString(m) }
-func (*Event) ProtoMessage() {}
-func (*Event) Descriptor() ([]byte, []int) {
- return fileDescriptor_2216fe83c9c12408, []int{1}
-}
-func (m *Event) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Event.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Event) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Event.Merge(m, src)
-}
-func (m *Event) XXX_Size() int {
- return m.Size()
-}
-func (m *Event) XXX_DiscardUnknown() {
- xxx_messageInfo_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Event proto.InternalMessageInfo
-
-func init() {
- proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
- proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
- proto.RegisterType((*Event)(nil), "mvccpb.Event")
-}
-
-func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
-
-var fileDescriptor_2216fe83c9c12408 = []byte{
- // 303 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
- 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
- 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
- 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
- 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
- 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
- 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
- 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
- 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
- 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
- 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
- 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
- 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
- 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
- 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
- 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
- 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
- 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
- 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
-}
-
-func (m *KeyValue) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Lease != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.Lease))
- i--
- dAtA[i] = 0x30
- }
- if len(m.Value) > 0 {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x2a
- }
- if m.Version != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.Version))
- i--
- dAtA[i] = 0x20
- }
- if m.ModRevision != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
- i--
- dAtA[i] = 0x18
- }
- if m.CreateRevision != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Event) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Event) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.PrevKv != nil {
- {
- size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintKv(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.Kv != nil {
- {
- size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintKv(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Type != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
- offset -= sovKv(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *KeyValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovKv(uint64(l))
- }
- if m.CreateRevision != 0 {
- n += 1 + sovKv(uint64(m.CreateRevision))
- }
- if m.ModRevision != 0 {
- n += 1 + sovKv(uint64(m.ModRevision))
- }
- if m.Version != 0 {
- n += 1 + sovKv(uint64(m.Version))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovKv(uint64(l))
- }
- if m.Lease != 0 {
- n += 1 + sovKv(uint64(m.Lease))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Event) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Type != 0 {
- n += 1 + sovKv(uint64(m.Type))
- }
- if m.Kv != nil {
- l = m.Kv.Size()
- n += 1 + l + sovKv(uint64(l))
- }
- if m.PrevKv != nil {
- l = m.PrevKv.Size()
- n += 1 + l + sovKv(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovKv(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozKv(x uint64) (n int) {
- return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *KeyValue) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthKv
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthKv
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
- }
- m.CreateRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CreateRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
- }
- m.ModRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ModRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- m.Version = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Version |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthKv
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthKv
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
- if m.Value == nil {
- m.Value = []byte{}
- }
- iNdEx = postIndex
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
- }
- m.Lease = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Lease |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipKv(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthKv
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Event) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Event: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= Event_EventType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthKv
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthKv
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Kv == nil {
- m.Kv = &KeyValue{}
- }
- if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthKv
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthKv
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.PrevKv == nil {
- m.PrevKv = &KeyValue{}
- }
- if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipKv(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthKv
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipKv(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowKv
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowKv
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowKv
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthKv
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupKv
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthKv
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/api/v3rpc/rpctypes/error.go b/api/v3rpc/rpctypes/error.go
deleted file mode 100644
index 50a859282b3..00000000000
--- a/api/v3rpc/rpctypes/error.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-import (
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// server-side error
-var (
- ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
- ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
- ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
- ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
- ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
- ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
- ErrGRPCInvalidClientAPIVersion = status.New(codes.InvalidArgument, "etcdserver: invalid client api version").Err()
- ErrGRPCInvalidSortOption = status.New(codes.InvalidArgument, "etcdserver: invalid sort option").Err()
- ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
- ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
- ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
-
- ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
- ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
- ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
-
- ErrGRPCWatchCanceled = status.New(codes.Canceled, "etcdserver: watch canceled").Err()
-
- ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
- ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
- ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
- ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
- ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err()
- ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err()
- ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err()
- ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err()
-
- ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
- ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
-
- ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
- ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
- ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
- ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
- ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
- ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
- ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
- ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err()
- ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
- ErrGRPCPermissionNotGiven = status.New(codes.InvalidArgument, "etcdserver: permission not given").Err()
- ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
- ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
- ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
- ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
- ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
- ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
- ErrGRPCAuthOldRevision = status.New(codes.InvalidArgument, "etcdserver: revision of auth store is old").Err()
-
- ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
- ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
- ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
- ErrGRPCNotCapable = status.New(codes.FailedPrecondition, "etcdserver: not capable").Err()
- ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
- ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
- ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
- ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
- ErrGRPCTimeoutWaitAppliedIndex = status.New(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long").Err()
- ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
- ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
- ErrGRPCNotSupportedForLearner = status.New(codes.FailedPrecondition, "etcdserver: rpc not supported for learner").Err()
- ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err()
-
- ErrGRPCWrongDowngradeVersionFormat = status.New(codes.InvalidArgument, "etcdserver: wrong downgrade target version format").Err()
- ErrGRPCInvalidDowngradeTargetVersion = status.New(codes.InvalidArgument, "etcdserver: invalid downgrade target version").Err()
- ErrGRPCClusterVersionUnavailable = status.New(codes.FailedPrecondition, "etcdserver: cluster version not found during downgrade").Err()
- ErrGRPCDowngradeInProcess = status.New(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress").Err()
- ErrGRPCNoInflightDowngrade = status.New(codes.FailedPrecondition, "etcdserver: no inflight downgrade job").Err()
-
- ErrGRPCCanceled = status.New(codes.Canceled, "etcdserver: request canceled").Err()
- ErrGRPCDeadlineExceeded = status.New(codes.DeadlineExceeded, "etcdserver: context deadline exceeded").Err()
-
- errStringToError = map[string]error{
- ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
- ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
- ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
- ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
-
- ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
- ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
- ErrorDesc(ErrGRPCInvalidSortOption): ErrGRPCInvalidSortOption,
- ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
- ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
- ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
-
- ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
- ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
- ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
-
- ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
- ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
- ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
- ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
- ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
- ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner,
- ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady,
- ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners,
-
- ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
- ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
-
- ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
- ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
- ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
- ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
- ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
- ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
- ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
- ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty,
- ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
- ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
- ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
- ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
- ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
- ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
- ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
- ErrorDesc(ErrGRPCAuthOldRevision): ErrGRPCAuthOldRevision,
-
- ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
- ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
- ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged,
- ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
- ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
- ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
- ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
- ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
- ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
- ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
- ErrorDesc(ErrGRPCNotSupportedForLearner): ErrGRPCNotSupportedForLearner,
- ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee,
-
- ErrorDesc(ErrGRPCClusterVersionUnavailable): ErrGRPCClusterVersionUnavailable,
- ErrorDesc(ErrGRPCWrongDowngradeVersionFormat): ErrGRPCWrongDowngradeVersionFormat,
- ErrorDesc(ErrGRPCInvalidDowngradeTargetVersion): ErrGRPCInvalidDowngradeTargetVersion,
- ErrorDesc(ErrGRPCDowngradeInProcess): ErrGRPCDowngradeInProcess,
- ErrorDesc(ErrGRPCNoInflightDowngrade): ErrGRPCNoInflightDowngrade,
- }
-)
-
-// client-side error
-var (
- ErrEmptyKey = Error(ErrGRPCEmptyKey)
- ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
- ErrValueProvided = Error(ErrGRPCValueProvided)
- ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
- ErrTooManyOps = Error(ErrGRPCTooManyOps)
- ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
- ErrInvalidSortOption = Error(ErrGRPCInvalidSortOption)
- ErrCompacted = Error(ErrGRPCCompacted)
- ErrFutureRev = Error(ErrGRPCFutureRev)
- ErrNoSpace = Error(ErrGRPCNoSpace)
-
- ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
- ErrLeaseExist = Error(ErrGRPCLeaseExist)
- ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
-
- ErrMemberExist = Error(ErrGRPCMemberExist)
- ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
- ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
- ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
- ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
- ErrMemberNotLearner = Error(ErrGRPCMemberNotLearner)
- ErrMemberLearnerNotReady = Error(ErrGRPCLearnerNotReady)
- ErrTooManyLearners = Error(ErrGRPCTooManyLearners)
-
- ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
- ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
-
- ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
- ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
- ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
- ErrUserEmpty = Error(ErrGRPCUserEmpty)
- ErrUserNotFound = Error(ErrGRPCUserNotFound)
- ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
- ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
- ErrRoleEmpty = Error(ErrGRPCRoleEmpty)
- ErrAuthFailed = Error(ErrGRPCAuthFailed)
- ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
- ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
- ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
- ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
- ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
- ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision)
- ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
-
- ErrNoLeader = Error(ErrGRPCNoLeader)
- ErrNotLeader = Error(ErrGRPCNotLeader)
- ErrLeaderChanged = Error(ErrGRPCLeaderChanged)
- ErrNotCapable = Error(ErrGRPCNotCapable)
- ErrStopped = Error(ErrGRPCStopped)
- ErrTimeout = Error(ErrGRPCTimeout)
- ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
- ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
- ErrTimeoutWaitAppliedIndex = Error(ErrGRPCTimeoutWaitAppliedIndex)
- ErrUnhealthy = Error(ErrGRPCUnhealthy)
- ErrCorrupt = Error(ErrGRPCCorrupt)
- ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee)
-
- ErrClusterVersionUnavailable = Error(ErrGRPCClusterVersionUnavailable)
- ErrWrongDowngradeVersionFormat = Error(ErrGRPCWrongDowngradeVersionFormat)
- ErrInvalidDowngradeTargetVersion = Error(ErrGRPCInvalidDowngradeTargetVersion)
- ErrDowngradeInProcess = Error(ErrGRPCDowngradeInProcess)
- ErrNoInflightDowngrade = Error(ErrGRPCNoInflightDowngrade)
-)
-
-// EtcdError defines gRPC server errors.
-// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
-type EtcdError struct {
- code codes.Code
- desc string
-}
-
-// Code returns grpc/codes.Code.
-// TODO: define clientv3/codes.Code.
-func (e EtcdError) Code() codes.Code {
- return e.code
-}
-
-func (e EtcdError) Error() string {
- return e.desc
-}
-
-func Error(err error) error {
- if err == nil {
- return nil
- }
- verr, ok := errStringToError[ErrorDesc(err)]
- if !ok { // not gRPC error
- return err
- }
- ev, ok := status.FromError(verr)
- var desc string
- if ok {
- desc = ev.Message()
- } else {
- desc = verr.Error()
- }
- return EtcdError{code: ev.Code(), desc: desc}
-}
-
-func ErrorDesc(err error) string {
- if s, ok := status.FromError(err); ok {
- return s.Message()
- }
- return err.Error()
-}
diff --git a/api/v3rpc/rpctypes/error_test.go b/api/v3rpc/rpctypes/error_test.go
deleted file mode 100644
index 525d9698311..00000000000
--- a/api/v3rpc/rpctypes/error_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-import (
- "testing"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-func TestConvert(t *testing.T) {
- e1 := status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
- e2 := ErrGRPCEmptyKey
- e3 := ErrEmptyKey
-
- if e1.Error() != e2.Error() {
- t.Fatalf("expected %q == %q", e1.Error(), e2.Error())
- }
- if ev1, ok := status.FromError(e1); ok && ev1.Code() != e3.(EtcdError).Code() {
- t.Fatalf("expected them to be equal, got %v / %v", ev1.Code(), e3.(EtcdError).Code())
- }
-
- if e1.Error() == e3.Error() {
- t.Fatalf("expected %q != %q", e1.Error(), e3.Error())
- }
- if ev2, ok := status.FromError(e2); ok && ev2.Code() != e3.(EtcdError).Code() {
- t.Fatalf("expected them to be equal, got %v / %v", ev2.Code(), e3.(EtcdError).Code())
- }
-}
diff --git a/api/version/version.go b/api/version/version.go
deleted file mode 100644
index bd39791a929..00000000000
--- a/api/version/version.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package version implements etcd version parsing and contains latest version
-// information.
-package version
-
-import (
- "fmt"
- "strings"
-
- "github.com/coreos/go-semver/semver"
-)
-
-var (
- // MinClusterVersion is the min cluster version this etcd binary is compatible with.
- MinClusterVersion = "3.0.0"
- Version = "3.6.0-alpha.0"
- APIVersion = "unknown"
-
- // Git SHA Value will be set during build
- GitSHA = "Not provided (use ./build instead of go build)"
-)
-
-// Get all constant versions defined in a centralized place.
-var (
- V3_0 = semver.Version{Major: 3, Minor: 0}
- V3_1 = semver.Version{Major: 3, Minor: 1}
- V3_2 = semver.Version{Major: 3, Minor: 2}
- V3_3 = semver.Version{Major: 3, Minor: 3}
- V3_4 = semver.Version{Major: 3, Minor: 4}
- V3_5 = semver.Version{Major: 3, Minor: 5}
- V3_6 = semver.Version{Major: 3, Minor: 6}
- V3_7 = semver.Version{Major: 3, Minor: 7}
- V4_0 = semver.Version{Major: 4, Minor: 0}
-)
-
-func init() {
- ver, err := semver.NewVersion(Version)
- if err == nil {
- APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
- }
-}
-
-type Versions struct {
- Server string `json:"etcdserver"`
- Cluster string `json:"etcdcluster"`
- Storage string `json:"storage"`
- // TODO: raft state machine version
-}
-
-// Cluster only keeps the major.minor.
-func Cluster(v string) string {
- vs := strings.Split(v, ".")
- if len(vs) <= 2 {
- return v
- }
- return fmt.Sprintf("%s.%s", vs[0], vs[1])
-}
-
-func Compare(ver1, ver2 semver.Version) int {
- return ver1.Compare(ver2)
-}
-
-func LessThan(ver1, ver2 semver.Version) bool {
- return ver1.LessThan(ver2)
-}
-
-func Equal(ver1, ver2 semver.Version) bool {
- return ver1.Equal(ver2)
-}
diff --git a/api/version/version_test.go b/api/version/version_test.go
deleted file mode 100644
index 532e7525a21..00000000000
--- a/api/version/version_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package version
-
-import (
- "testing"
-
- "github.com/coreos/go-semver/semver"
- "github.com/stretchr/testify/assert"
-)
-
-func TestVersionCompare(t *testing.T) {
- cases := []struct {
- name string
- ver1 semver.Version
- ver2 semver.Version
- expectedCompareResult int
- expectedLessThanResult bool
- expectedEqualResult bool
- }{
- {
- name: "ver1 should be great than ver2",
- ver1: V3_5,
- ver2: V3_4,
- expectedCompareResult: 1,
- expectedLessThanResult: false,
- expectedEqualResult: false,
- },
- {
- name: "ver1(4.0) should be great than ver2",
- ver1: V4_0,
- ver2: V3_7,
- expectedCompareResult: 1,
- expectedLessThanResult: false,
- expectedEqualResult: false,
- },
- {
- name: "ver1 should be less than ver2",
- ver1: V3_5,
- ver2: V3_6,
- expectedCompareResult: -1,
- expectedLessThanResult: true,
- expectedEqualResult: false,
- },
- {
- name: "ver1 should be less than ver2 (4.0)",
- ver1: V3_5,
- ver2: V4_0,
- expectedCompareResult: -1,
- expectedLessThanResult: true,
- expectedEqualResult: false,
- },
- {
- name: "ver1 should be equal to ver2",
- ver1: V3_5,
- ver2: V3_5,
- expectedCompareResult: 0,
- expectedLessThanResult: false,
- expectedEqualResult: true,
- },
- }
- for _, tc := range cases {
- t.Run(tc.name, func(t *testing.T) {
- compareResult := Compare(tc.ver1, tc.ver2)
- lessThanResult := LessThan(tc.ver1, tc.ver2)
- equalResult := Equal(tc.ver1, tc.ver2)
-
- assert.Equal(t, tc.expectedCompareResult, compareResult)
- assert.Equal(t, tc.expectedLessThanResult, lessThanResult)
- assert.Equal(t, tc.expectedEqualResult, equalResult)
- })
- }
-}
diff --git a/api/versionpb/version.pb.go b/api/versionpb/version.pb.go
deleted file mode 100644
index 8e5ce7ec2a6..00000000000
--- a/api/versionpb/version.pb.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: version.proto
-
-package versionpb
-
-import (
- fmt "fmt"
- math "math"
-
- _ "github.com/gogo/protobuf/gogoproto"
- protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-var E_EtcdVersionMsg = &proto.ExtensionDesc{
- ExtendedType: (*protobuf.MessageOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 50000,
- Name: "versionpb.etcd_version_msg",
- Tag: "bytes,50000,opt,name=etcd_version_msg",
- Filename: "version.proto",
-}
-
-var E_EtcdVersionField = &proto.ExtensionDesc{
- ExtendedType: (*protobuf.FieldOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 50001,
- Name: "versionpb.etcd_version_field",
- Tag: "bytes,50001,opt,name=etcd_version_field",
- Filename: "version.proto",
-}
-
-var E_EtcdVersionEnum = &proto.ExtensionDesc{
- ExtendedType: (*protobuf.EnumOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 50002,
- Name: "versionpb.etcd_version_enum",
- Tag: "bytes,50002,opt,name=etcd_version_enum",
- Filename: "version.proto",
-}
-
-var E_EtcdVersionEnumValue = &proto.ExtensionDesc{
- ExtendedType: (*protobuf.EnumValueOptions)(nil),
- ExtensionType: (*string)(nil),
- Field: 50003,
- Name: "versionpb.etcd_version_enum_value",
- Tag: "bytes,50003,opt,name=etcd_version_enum_value",
- Filename: "version.proto",
-}
-
-func init() {
- proto.RegisterExtension(E_EtcdVersionMsg)
- proto.RegisterExtension(E_EtcdVersionField)
- proto.RegisterExtension(E_EtcdVersionEnum)
- proto.RegisterExtension(E_EtcdVersionEnumValue)
-}
-
-func init() { proto.RegisterFile("version.proto", fileDescriptor_7d2c07d79758f814) }
-
-var fileDescriptor_7d2c07d79758f814 = []byte{
- // 261 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x4b, 0x2d, 0x2a,
- 0xce, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x72, 0x0b, 0x92, 0xa4,
- 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xa2, 0xfa, 0x20, 0x16, 0x44, 0x81, 0x94, 0x42, 0x7a, 0x7e,
- 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x92, 0x5a, 0x9c, 0x5c, 0x94,
- 0x59, 0x50, 0x92, 0x5f, 0x04, 0x51, 0x61, 0xe5, 0xc7, 0x25, 0x90, 0x5a, 0x92, 0x9c, 0x12, 0x0f,
- 0x35, 0x29, 0x3e, 0xb7, 0x38, 0x5d, 0x48, 0x5e, 0x0f, 0xa2, 0x4d, 0x0f, 0xa6, 0x4d, 0xcf, 0x37,
- 0xb5, 0xb8, 0x38, 0x31, 0x3d, 0xd5, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0xe2, 0x42, 0x1b,
- 0xb3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x1f, 0x48, 0x6b, 0x18, 0x44, 0xa7, 0x6f, 0x71, 0x7a, 0x07,
- 0x23, 0xa3, 0x55, 0x00, 0x97, 0x10, 0x8a, 0x79, 0x69, 0x99, 0xa9, 0x39, 0x29, 0x42, 0xb2, 0x18,
- 0x26, 0xba, 0x81, 0xc4, 0x61, 0xe6, 0x5d, 0x84, 0x9a, 0x27, 0x80, 0x64, 0x1e, 0x58, 0x01, 0xc8,
- 0x44, 0x5f, 0x2e, 0x41, 0x14, 0x13, 0x53, 0xf3, 0x4a, 0x73, 0x85, 0x64, 0x30, 0x0c, 0x74, 0xcd,
- 0x2b, 0xcd, 0x85, 0x99, 0x77, 0x09, 0x6a, 0x1e, 0x3f, 0x92, 0x79, 0x20, 0x79, 0x90, 0x71, 0xb1,
- 0x5c, 0xe2, 0x18, 0xc6, 0xc5, 0x97, 0x25, 0xe6, 0x94, 0xa6, 0x0a, 0x29, 0x62, 0x35, 0x34, 0x0c,
- 0x24, 0x07, 0x33, 0xf9, 0x32, 0xd4, 0x64, 0x11, 0x34, 0x93, 0xc1, 0x8a, 0x3a, 0x18, 0x19, 0x9d,
- 0x04, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f,
- 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0xa6, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x77, 0x44, 0xe2,
- 0xa4, 0xbc, 0x01, 0x00, 0x00,
-}
diff --git a/api/versionpb/version.proto b/api/versionpb/version.proto
deleted file mode 100644
index 27cfb5d40c4..00000000000
--- a/api/versionpb/version.proto
+++ /dev/null
@@ -1,28 +0,0 @@
-syntax = "proto3";
-package versionpb;
-
-import "gogoproto/gogo.proto";
-import "google/protobuf/descriptor.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-// Indicates etcd version that introduced the message, used to determine minimal etcd version required to interpret wal that includes this message.
-extend google.protobuf.MessageOptions {
- optional string etcd_version_msg = 50000;
-}
-
-// Indicates etcd version that introduced the field, used to determine minimal etcd version required to interpret wal that sets this field.
-extend google.protobuf.FieldOptions {
- optional string etcd_version_field = 50001;
-}
-
-// Indicates etcd version that introduced the enum, used to determine minimal etcd version required to interpret wal that uses this enum.
-extend google.protobuf.EnumOptions {
- optional string etcd_version_enum = 50002;
-}
-
-// Indicates etcd version that introduced the enum value, used to determine minimal etcd version required to interpret wal that sets this enum value.
-extend google.protobuf.EnumValueOptions {
- optional string etcd_version_enum_value = 50003;
-}
diff --git a/bill-of-materials.json b/bill-of-materials.json
deleted file mode 100644
index 9638e8e54aa..00000000000
--- a/bill-of-materials.json
+++ /dev/null
@@ -1,762 +0,0 @@
-[
- {
- "project": "github.com/VividCortex/ewma",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/anishathalye/porcupine",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/benbjohnson/clock",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/beorn7/perks/quantile",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "github.com/bgentry/speakeasy",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9441624365482234
- }
- ]
- },
- {
- "project": "github.com/cenkalti/backoff/v4",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/cespare/xxhash/v2",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/cheggaaa/pb/v3",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9916666666666667
- }
- ]
- },
- {
- "project": "github.com/coreos/go-semver/semver",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/coreos/go-systemd/v22",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 0.9966703662597114
- }
- ]
- },
- {
- "project": "github.com/creack/pty",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "github.com/davecgh/go-spew/spew",
- "licenses": [
- {
- "type": "ISC License",
- "confidence": 0.9850746268656716
- }
- ]
- },
- {
- "project": "github.com/dustin/go-humanize",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.96875
- }
- ]
- },
- {
- "project": "github.com/fatih/color",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/go-logr/logr",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/go-logr/stdr",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/gogo/protobuf",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9163346613545816
- }
- ]
- },
- {
- "project": "github.com/golang-jwt/jwt/v4",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "github.com/golang/groupcache/lru",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 0.9966703662597114
- }
- ]
- },
- {
- "project": "github.com/golang/protobuf",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "github.com/google/btree",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/google/go-cmp/cmp",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "github.com/gorilla/websocket",
- "licenses": [
- {
- "type": "BSD 2-clause \"Simplified\" License",
- "confidence": 0.9852216748768473
- }
- ]
- },
- {
- "project": "github.com/grpc-ecosystem/go-grpc-middleware",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/grpc-ecosystem/go-grpc-prometheus",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/grpc-ecosystem/grpc-gateway",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.979253112033195
- }
- ]
- },
- {
- "project": "github.com/grpc-ecosystem/grpc-gateway/v2",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.979253112033195
- }
- ]
- },
- {
- "project": "github.com/inconshreveable/mousetrap",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/jonboulle/clockwork",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/mattn/go-colorable",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/mattn/go-isatty",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9587628865979382
- }
- ]
- },
- {
- "project": "github.com/mattn/go-runewidth",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/matttproud/golang_protobuf_extensions/pbutil",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/olekukonko/tablewriter",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "github.com/pmezard/go-difflib/difflib",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9830508474576272
- }
- ]
- },
- {
- "project": "github.com/prometheus/client_golang/prometheus",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/prometheus/client_model/go",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/prometheus/common",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/prometheus/procfs",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/rivo/uniseg",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/sirupsen/logrus",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/soheilhy/cmux",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/spf13/cobra",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 0.9573241061130334
- }
- ]
- },
- {
- "project": "github.com/spf13/pflag",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "github.com/stretchr/testify/assert",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/tmc/grpc-websocket-proxy/wsproxy",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "github.com/xiang90/probing",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/bbolt",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/api/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/client/pkg/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/client/v2",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/client/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/etcdctl/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/etcdutl/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/pkg/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/server/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/tests/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/etcd/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.etcd.io/raft/v3",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.opentelemetry.io/otel",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.opentelemetry.io/otel/exporters/otlp/internal/retry",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.opentelemetry.io/otel/exporters/otlp/otlptrace",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.opentelemetry.io/otel/metric",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.opentelemetry.io/otel/sdk",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.opentelemetry.io/otel/trace",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.opentelemetry.io/proto/otlp",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.uber.org/atomic",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "go.uber.org/multierr",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "go.uber.org/zap",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "golang.org/x/crypto",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "golang.org/x/net",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "golang.org/x/sys/unix",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "golang.org/x/text",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "golang.org/x/time/rate",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "google.golang.org/genproto",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "google.golang.org/grpc",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "google.golang.org/protobuf",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "gopkg.in/natefinch/lumberjack.v2",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "gopkg.in/yaml.v2",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- },
- {
- "type": "MIT License",
- "confidence": 0.8975609756097561
- }
- ]
- },
- {
- "project": "gopkg.in/yaml.v3",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.7469879518072289
- }
- ]
- },
- {
- "project": "sigs.k8s.io/json",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 0.9617021276595744
- }
- ]
- },
- {
- "project": "sigs.k8s.io/yaml",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 1
- }
- ]
- }
-]
diff --git a/bill-of-materials.override.json b/bill-of-materials.override.json
deleted file mode 100644
index 15afc56409d..00000000000
--- a/bill-of-materials.override.json
+++ /dev/null
@@ -1,18 +0,0 @@
-[
- {
- "project": "sigs.k8s.io/yaml",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License"
- }
- ]
- },
- {
- "project": "github.com/inconshreveable/mousetrap",
- "licenses": [
- {
- "type": "Apache License 2.0"
- }
- ]
- }
-]
diff --git a/cert/ca.crt b/cert/ca.crt
new file mode 100644
index 00000000000..d18562e5467
--- /dev/null
+++ b/cert/ca.crt
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC4zCCAcugAwIBAgIBADANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDEwdldGNk
+LWNhMCAXDTIyMDMxMTAyMzY0M1oYDzIxMjIwMjE1MDIzNjQzWjASMRAwDgYDVQQD
+EwdldGNkLWNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtUMEfdwu
+s4r9zw8LZDQWiAWzW/v8TAQ2GsO7s8MRtmcUbNL4aC26cwwlJhCkvY99UxyAjoUR
+izWW0g3mP8AiLZgZP+SzsrYClrQI86OnmiNK8sHHU4mfasIYW1WXU3YRslyBomdg
+a9Ytt+d3MoJNLi0Xg5pd3d4kyEWjhwCIX3QE5xGkME6MiEu6hrz7i25YaK2NsK9Y
+oTwGm3TXhWc9Y7WJK0Y7+W6f5oodZPXCYzQnEYTIxZt8TtqWUgG7ybn8v0gBKPvm
+yMHiSFkmsgfu8Gm3E1e4/dARAxXkoOoIKaDX1uLn6VXQ73zyeHSWkuyntzeHwF8K
+CnPq75gf3NibUQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQU5/jC6+FWQA7i+pdzUI0ES5nQ2uUwDQYJKoZIhvcNAQEL
+BQADggEBAAdN0ptUClmsRx2/MQsMRrQngJBzKWA0HIc72JiBtKblUffbYIso3u6E
+pJERmIsnhKhmzLB9WTZ3Nc18k/+AoFzDYF/7nSMFwEII6ei27WUH4zEeg8zwTv6h
+aSHUrVHZW5hoovT2JqI6wxsuLuUVHZqbRdA+55A5xGmpl8ASdvSklTL4iV+eS/Ly
+nAZeIYI3WBLJF56SigyIncw+dbbQtqLk+F4sAGsW9PwoeAHgT4c6WaL1ODLKtZsW
+YAFs4FENADaLobHXT3PPgCBOU30mBU2JiwkYiu50GXd23ukJpvyRNCJe57q377qw
+nZrEOoRxco8iql+SfevQrxSEXbywzQU=
+-----END CERTIFICATE-----
diff --git a/cert/server.crt b/cert/server.crt
new file mode 100644
index 00000000000..f9e5efae4bb
--- /dev/null
+++ b/cert/server.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDQzCCAiugAwIBAgIILzYehk9swfcwDQYJKoZIhvcNAQELBQAwEjEQMA4GA1UE
+AxMHZXRjZC1jYTAgFw0yMjAzMTEwMjM2NDNaGA8yMTIyMDIxNTAyMzY0NFowFzEV
+MBMGA1UEAxMMazhzLW1hc3RlcjAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEAtVP1gFmU8Ojcn+dL+1YVxPMshM4MS14LcJCnRx2BJye/1CFJrN6gjllW
+ljebFxrhF4HebOlknHgVI9LDZngtLkNGE4QiVW2mRnQPahEZhZE1t6kAQt6KeOxb
+exG7tj2/dUY+w8Skk6BfT4jD/RXu64deX09MkwjMyVCalLOZ5ESAy3W9Iw13qVhT
+aIbDZ35SR7Rah44wu8ZAtL18qvGSKhAtelX21R4ywij2p9AsI/TK4Js3HrDTsu96
+UjUpSuBmp2veYu/ju6H7YF1qmsXPsJu6u+dMF5e2T8sQj2vT9FoOb7Ifde+nkRBo
+wQ7mZWOaoJsn/89+RMmOvChRigSiFQIDAQABo4GVMIGSMA4GA1UdDwEB/wQEAwIF
+oDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHwYDVR0jBBgwFoAU5/jC
+6+FWQA7i+pdzUI0ES5nQ2uUwQAYDVR0RBDkwN4IJbG9jYWxob3N0ggxrOHMtbWFz
+dGVyMDGHBH8AAAGHBAoKCnmHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQEL
+BQADggEBAIZzw5TgvYWx5wMacdsT00wxa4bWu6OBpc+wEmNRAxl0za9krcE3Xnxl
+pxSnoLLW3F2KF5shLnsZ3cQh1GlzVeuo3pWIgKcXEZ+98SJC3lh04AF9JTJqDJTC
+tM8LG8aF3PD+Y/r/5xihGKLnbfkdbI2rBdirntQJ0+P1ZoUbdpSbq8g+5IYQ1Va1
+sZKy04pKDv4wQ7EtXknF8Cz3EwkYv6imO+mfW6bf/VDV6LuVp/IQhapSwx4tgYao
+QXUvnc9qlw4enDLMdqx+fldUdSAUNa+ye3+yvqD2hbgjeXfkm1o1FpT42dj9TxI3
+7TyJ89HsaL+z2QagHcmWD4O5LbX4+ik=
+-----END CERTIFICATE-----
diff --git a/cert/server.key b/cert/server.key
new file mode 100644
index 00000000000..b5519b68ba5
--- /dev/null
+++ b/cert/server.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAtVP1gFmU8Ojcn+dL+1YVxPMshM4MS14LcJCnRx2BJye/1CFJ
+rN6gjllWljebFxrhF4HebOlknHgVI9LDZngtLkNGE4QiVW2mRnQPahEZhZE1t6kA
+Qt6KeOxbexG7tj2/dUY+w8Skk6BfT4jD/RXu64deX09MkwjMyVCalLOZ5ESAy3W9
+Iw13qVhTaIbDZ35SR7Rah44wu8ZAtL18qvGSKhAtelX21R4ywij2p9AsI/TK4Js3
+HrDTsu96UjUpSuBmp2veYu/ju6H7YF1qmsXPsJu6u+dMF5e2T8sQj2vT9FoOb7If
+de+nkRBowQ7mZWOaoJsn/89+RMmOvChRigSiFQIDAQABAoIBAAawrFQp+fbRgKJd
+tE33pSH5HuFfgNCfDaj/jUxwaD17l/ZJrCA2rpHR9gHg11YI5dkqL3yxN8cWHyGN
+OyxirrgQP1uk+mdQflwHGDJ/owNskiDOmXXeJBnGDrIBu31D1faLuvEaJkBUIHAc
+ya5iysfh3LeDg33BS1z6HlnyLnFfja9x+qUcrJUOFg5c/jbd6t3Khc/vqninpYJo
+sCiwyXqtUbJqX+sIxvveFzyweUr0ywrfdiuihptW72it6bW4Q5uZcr9Gn5bMU9PK
+cmldrhbAi8ixrTuQTrZjijojrRY6OEwI7jrpRcXZ7t3s2G0suw0PoVI46ZN3vz7R
+vRaT5IECgYEAw2KfSdP0kM/dfxY3zvOBIwwCzrXXn+ua7iaCTXzTRv/G0V4qqW+7
+m2DB9IM33pqj1AWudS0vEVN7TONxagK/s3o7172KrWk7AKss/GeyFK25Kx5fYWIF
+pRsftXoFHjvdiOmO6GGBaMqLb0P2lngCWoaBHlmr9uzAQY0WPlLek8UCgYEA7ZTq
+joIzMMb8uSuo4q5qr16yOTChWRcXZqpfVSX4y/aJRGwzhmsHU8P2PjItX5hXURgQ
+2wZlXB053kfgFHE1hapt0aIS9qXWtu/QRtWwDmSk4ulkkCmn2QT//RpIFrXOn4qq
+fgf9hjnzpCuaf5FcJQVHT4pgG/LkzknbqpRsqhECgYEAjV04tJjnVTMgJgg0PsbN
+w0a4bUkCBpHX2cEA/AF5d+AtwGPqaAcQbP4XtsqNzMCEEi4+KEeVy2pkRqA0+aed
+fcTNsW1Q/eCqMPSoqsJ4BSAgXkMubW4XeXrjeVEcjOBxi9K4dAfAMsqBEfLRYdLY
+mRjCKOxmUFTBUWw8EMGyiqUCgYEAq5QaeWTqV1W9+nTfeSYBgjlfeRH31IFqswhj
+5PiRX6vionmKFI+DMSma0nwmbJ12oehBdAyAcy/gNPmviNPhlXDp8rWcAGjwUhmL
+TzzP8vUYZ4+qwrpyr7Z+sWmjmlMer/XS/0YCAEgl/vBGmc5+v3W6dGU417ZpK9oH
+PIAIoiECgYAkhCGjGwRs3jLMxFCDoDyoXWR4kkTWuXhDszaoJsN0SJ0LJ4xOzsv4
+Ko7riFgzoHZ3nKOPCLQY/Rpv5J0wm1jErerKGtIgWGoG8iE/iVs6N9yv1iNHewDy
+TSfmF7oeGXDSd4XdmSHOmM1jd+xfsvrYQC9LugEYc6M1mVB+MMyBrQ==
+-----END RSA PRIVATE KEY-----
diff --git a/client/pkg/LICENSE b/client/pkg/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/client/pkg/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/client/pkg/fileutil/filereader.go b/client/pkg/fileutil/filereader.go
deleted file mode 100644
index 55248888c60..00000000000
--- a/client/pkg/fileutil/filereader.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "bufio"
- "io"
- "io/fs"
- "os"
-)
-
-// FileReader is a wrapper of io.Reader. It also provides file info.
-type FileReader interface {
- io.Reader
- FileInfo() (fs.FileInfo, error)
-}
-
-type fileReader struct {
- *os.File
-}
-
-func NewFileReader(f *os.File) FileReader {
- return &fileReader{f}
-}
-
-func (fr *fileReader) FileInfo() (fs.FileInfo, error) {
- return fr.Stat()
-}
-
-// FileBufReader is a wrapper of bufio.Reader. It also provides file info.
-type FileBufReader struct {
- *bufio.Reader
- fi fs.FileInfo
-}
-
-func NewFileBufReader(fr FileReader) *FileBufReader {
- bufReader := bufio.NewReader(fr)
- fi, err := fr.FileInfo()
- if err != nil {
- // This should never happen.
- panic(err)
- }
- return &FileBufReader{bufReader, fi}
-}
-
-func (fbr *FileBufReader) FileInfo() fs.FileInfo {
- return fbr.fi
-}
diff --git a/client/pkg/fileutil/filereader_test.go b/client/pkg/fileutil/filereader_test.go
deleted file mode 100644
index 2f863cdcef5..00000000000
--- a/client/pkg/fileutil/filereader_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "os"
- "strings"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestFileBufReader(t *testing.T) {
- f, err := os.CreateTemp(t.TempDir(), "wal")
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- fi, err := f.Stat()
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
-
- fbr := NewFileBufReader(NewFileReader(f))
-
- if !strings.HasPrefix(fbr.FileInfo().Name(), "wal") {
- t.Errorf("Unexpected file name: %s", fbr.FileInfo().Name())
- }
- assert.Equal(t, fi.Size(), fbr.FileInfo().Size())
- assert.Equal(t, fi.IsDir(), fbr.FileInfo().IsDir())
- assert.Equal(t, fi.Mode(), fbr.FileInfo().Mode())
- assert.Equal(t, fi.ModTime(), fbr.FileInfo().ModTime())
-}
diff --git a/client/pkg/fileutil/fileutil.go b/client/pkg/fileutil/fileutil.go
deleted file mode 100644
index 3bedee7d2b3..00000000000
--- a/client/pkg/fileutil/fileutil.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "fmt"
- "io"
- "io/fs"
- "os"
- "path/filepath"
-
- "go.uber.org/zap"
-
- "go.etcd.io/etcd/client/pkg/v3/verify"
-)
-
-const (
- // PrivateFileMode grants owner to read/write a file.
- PrivateFileMode = 0600
-)
-
-// IsDirWriteable checks if dir is writable by writing and removing a file
-// to dir. It returns nil if dir is writable.
-func IsDirWriteable(dir string) error {
- f, err := filepath.Abs(filepath.Join(dir, ".touch"))
- if err != nil {
- return err
- }
- if err := os.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
- return err
- }
- return os.Remove(f)
-}
-
-// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
-// does not exists. TouchDirAll also ensures the given directory is writable.
-func TouchDirAll(lg *zap.Logger, dir string) error {
- verify.Assert(lg != nil, "nil log isn't allowed")
- // If path is already a directory, MkdirAll does nothing and returns nil, so,
- // first check if dir exists with an expected permission mode.
- if Exist(dir) {
- err := CheckDirPermission(dir, PrivateDirMode)
- if err != nil {
- lg.Warn("check file permission", zap.Error(err))
- }
- } else {
- err := os.MkdirAll(dir, PrivateDirMode)
- if err != nil {
- // if mkdirAll("a/text") and "text" is not
- // a directory, this will return syscall.ENOTDIR
- return err
- }
- }
-
- return IsDirWriteable(dir)
-}
-
-// CreateDirAll is similar to TouchDirAll but returns error
-// if the deepest directory was not empty.
-func CreateDirAll(lg *zap.Logger, dir string) error {
- err := TouchDirAll(lg, dir)
- if err == nil {
- var ns []string
- ns, err = ReadDir(dir)
- if err != nil {
- return err
- }
- if len(ns) != 0 {
- err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
- }
- }
- return err
-}
-
-// Exist returns true if a file or directory exists.
-func Exist(name string) bool {
- _, err := os.Stat(name)
- return err == nil
-}
-
-// DirEmpty returns true if a directory empty and can access.
-func DirEmpty(name string) bool {
- ns, err := ReadDir(name)
- return len(ns) == 0 && err == nil
-}
-
-// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily
-// shorten the length of the file.
-func ZeroToEnd(f *os.File) error {
- // TODO: support FALLOC_FL_ZERO_RANGE
- off, err := f.Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- lenf, lerr := f.Seek(0, io.SeekEnd)
- if lerr != nil {
- return lerr
- }
- if err = f.Truncate(off); err != nil {
- return err
- }
- // make sure blocks remain allocated
- if err = Preallocate(f, lenf, true); err != nil {
- return err
- }
- _, err = f.Seek(off, io.SeekStart)
- return err
-}
-
-// CheckDirPermission checks permission on an existing dir.
-// Returns error if dir is empty or exist with a different permission than specified.
-func CheckDirPermission(dir string, perm os.FileMode) error {
- if !Exist(dir) {
- return fmt.Errorf("directory %q empty, cannot check permission", dir)
- }
- //check the existing permission on the directory
- dirInfo, err := os.Stat(dir)
- if err != nil {
- return err
- }
- dirMode := dirInfo.Mode().Perm()
- if dirMode != perm {
- err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode))
- return err
- }
- return nil
-}
-
-// RemoveMatchFile deletes file if matchFunc is true on an existing dir
-// Returns error if the dir does not exist or remove file fail
-func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string) bool) error {
- if lg == nil {
- lg = zap.NewNop()
- }
- if !Exist(dir) {
- return fmt.Errorf("directory %s does not exist", dir)
- }
- fileNames, err := ReadDir(dir)
- if err != nil {
- return err
- }
- var removeFailedFiles []string
- for _, fileName := range fileNames {
- if matchFunc(fileName) {
- file := filepath.Join(dir, fileName)
- if err = os.Remove(file); err != nil {
- removeFailedFiles = append(removeFailedFiles, fileName)
- lg.Error("remove file failed",
- zap.String("file", file),
- zap.Error(err))
- continue
- }
- }
- }
- if len(removeFailedFiles) != 0 {
- return fmt.Errorf("remove file(s) %v error", removeFailedFiles)
- }
- return nil
-}
-
-// ListFiles lists files if matchFunc is true on an existing dir
-// Returns error if the dir does not exist
-func ListFiles(dir string, matchFunc func(fileName string) bool) ([]string, error) {
- var files []string
- err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {
- if matchFunc(path) {
- files = append(files, path)
- }
- return nil
- })
- return files, err
-}
diff --git a/client/pkg/fileutil/fileutil_test.go b/client/pkg/fileutil/fileutil_test.go
deleted file mode 100644
index f6b22e55de6..00000000000
--- a/client/pkg/fileutil/fileutil_test.go
+++ /dev/null
@@ -1,232 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "fmt"
- "io"
- "math/rand"
- "os"
- "os/user"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "go.uber.org/zap/zaptest"
-)
-
-func TestIsDirWriteable(t *testing.T) {
- tmpdir := t.TempDir()
- if err := IsDirWriteable(tmpdir); err != nil {
- t.Fatalf("unexpected IsDirWriteable error: %v", err)
- }
- if err := os.Chmod(tmpdir, 0444); err != nil {
- t.Fatalf("unexpected os.Chmod error: %v", err)
- }
- me, err := user.Current()
- if err != nil {
- // err can be non-nil when cross compiled
- // http://stackoverflow.com/questions/20609415/cross-compiling-user-current-not-implemented-on-linux-amd64
- t.Skipf("failed to get current user: %v", err)
- }
- if me.Name == "root" || runtime.GOOS == "windows" {
- // ideally we should check CAP_DAC_OVERRIDE.
- // but it does not matter for tests.
- // Chmod is not supported under windows.
- t.Skipf("running as a superuser or in windows")
- }
- if err := IsDirWriteable(tmpdir); err == nil {
- t.Fatalf("expected IsDirWriteable to error")
- }
-}
-
-func TestCreateDirAll(t *testing.T) {
- tmpdir := t.TempDir()
-
- tmpdir2 := filepath.Join(tmpdir, "testdir")
- if err := CreateDirAll(zaptest.NewLogger(t), tmpdir2); err != nil {
- t.Fatal(err)
- }
-
- if err := os.WriteFile(filepath.Join(tmpdir2, "text.txt"), []byte("test text"), PrivateFileMode); err != nil {
- t.Fatal(err)
- }
-
- if err := CreateDirAll(zaptest.NewLogger(t), tmpdir2); err == nil || !strings.Contains(err.Error(), "to be empty, got") {
- t.Fatalf("unexpected error %v", err)
- }
-}
-
-func TestExist(t *testing.T) {
- fdir := filepath.Join(os.TempDir(), fmt.Sprint(time.Now().UnixNano()+rand.Int63n(1000)))
- os.RemoveAll(fdir)
- if err := os.Mkdir(fdir, 0666); err != nil {
- t.Skip(err)
- }
- defer os.RemoveAll(fdir)
- if !Exist(fdir) {
- t.Fatalf("expected Exist true, got %v", Exist(fdir))
- }
-
- f, err := os.CreateTemp(os.TempDir(), "fileutil")
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
-
- if g := Exist(f.Name()); !g {
- t.Errorf("exist = %v, want true", g)
- }
-
- os.Remove(f.Name())
- if g := Exist(f.Name()); g {
- t.Errorf("exist = %v, want false", g)
- }
-}
-
-func TestDirEmpty(t *testing.T) {
- dir := t.TempDir()
-
- if !DirEmpty(dir) {
- t.Fatalf("expected DirEmpty true, got %v", DirEmpty(dir))
- }
-
- file, err := os.CreateTemp(dir, "new_file")
- if err != nil {
- t.Fatal(err)
- }
- file.Close()
-
- if DirEmpty(dir) {
- t.Fatalf("expected DirEmpty false, got %v", DirEmpty(dir))
- }
- if DirEmpty(file.Name()) {
- t.Fatalf("expected DirEmpty false, got %v", DirEmpty(file.Name()))
- }
-}
-
-func TestZeroToEnd(t *testing.T) {
- f, err := os.CreateTemp(os.TempDir(), "fileutil")
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(f.Name())
- defer f.Close()
-
- // Ensure 0 size is a nop so zero-to-end on an empty file won't give EINVAL.
- if err = ZeroToEnd(f); err != nil {
- t.Fatal(err)
- }
-
- b := make([]byte, 1024)
- for i := range b {
- b[i] = 12
- }
- if _, err = f.Write(b); err != nil {
- t.Fatal(err)
- }
- if _, err = f.Seek(512, io.SeekStart); err != nil {
- t.Fatal(err)
- }
- if err = ZeroToEnd(f); err != nil {
- t.Fatal(err)
- }
- off, serr := f.Seek(0, io.SeekCurrent)
- if serr != nil {
- t.Fatal(serr)
- }
- if off != 512 {
- t.Fatalf("expected offset 512, got %d", off)
- }
-
- b = make([]byte, 512)
- if _, err = f.Read(b); err != nil {
- t.Fatal(err)
- }
- for i := range b {
- if b[i] != 0 {
- t.Errorf("expected b[%d] = 0, got %d", i, b[i])
- }
- }
-}
-
-func TestDirPermission(t *testing.T) {
- tmpdir := t.TempDir()
-
- tmpdir2 := filepath.Join(tmpdir, "testpermission")
- // create a new dir with 0700
- if err := CreateDirAll(zaptest.NewLogger(t), tmpdir2); err != nil {
- t.Fatal(err)
- }
- // check dir permission with mode different than created dir
- if err := CheckDirPermission(tmpdir2, 0600); err == nil {
- t.Errorf("expected error, got nil")
- }
-}
-
-func TestRemoveMatchFile(t *testing.T) {
- tmpdir := t.TempDir()
- f, err := os.CreateTemp(tmpdir, "tmp")
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
- f, err = os.CreateTemp(tmpdir, "foo.tmp")
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
-
- err = RemoveMatchFile(zaptest.NewLogger(t), tmpdir, func(fileName string) bool {
- return strings.HasPrefix(fileName, "tmp")
- })
- if err != nil {
- t.Errorf("expected nil, got error")
- }
- fnames, err := ReadDir(tmpdir)
- if err != nil {
- t.Fatal(err)
- }
- if len(fnames) != 1 {
- t.Errorf("expected exist 1 files, got %d", len(fnames))
- }
-
- f, err = os.CreateTemp(tmpdir, "tmp")
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
- err = RemoveMatchFile(zaptest.NewLogger(t), tmpdir, func(fileName string) bool {
- os.Remove(filepath.Join(tmpdir, fileName))
- return strings.HasPrefix(fileName, "tmp")
- })
- if err == nil {
- t.Errorf("expected error, got nil")
- }
-}
-
-func TestTouchDirAll(t *testing.T) {
- tmpdir := t.TempDir()
- assert.Panics(t, func() {
- TouchDirAll(nil, tmpdir)
- }, "expected panic with nil log")
-
- if err := TouchDirAll(zaptest.NewLogger(t), tmpdir); err != nil {
- t.Fatal(err)
- }
-}
diff --git a/client/pkg/fileutil/lock.go b/client/pkg/fileutil/lock.go
deleted file mode 100644
index 338627f43c8..00000000000
--- a/client/pkg/fileutil/lock.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "errors"
- "os"
-)
-
-var (
- ErrLocked = errors.New("fileutil: file already locked")
-)
-
-type LockedFile struct{ *os.File }
diff --git a/client/pkg/fileutil/lock_linux_test.go b/client/pkg/fileutil/lock_linux_test.go
deleted file mode 100644
index 65dd96b91f5..00000000000
--- a/client/pkg/fileutil/lock_linux_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-
-package fileutil
-
-import "testing"
-
-// TestLockAndUnlockSyscallFlock tests the fallback flock using the flock syscall.
-func TestLockAndUnlockSyscallFlock(t *testing.T) {
- oldTryLock, oldLock := linuxTryLockFile, linuxLockFile
- defer func() {
- linuxTryLockFile, linuxLockFile = oldTryLock, oldLock
- }()
- linuxTryLockFile, linuxLockFile = flockTryLockFile, flockLockFile
- TestLockAndUnlock(t)
-}
diff --git a/client/pkg/fileutil/lock_test.go b/client/pkg/fileutil/lock_test.go
deleted file mode 100644
index b7f6fd5ce57..00000000000
--- a/client/pkg/fileutil/lock_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "os"
- "testing"
- "time"
-)
-
-func TestLockAndUnlock(t *testing.T) {
- f, err := os.CreateTemp("", "lock")
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
- defer func() {
- err = os.Remove(f.Name())
- if err != nil {
- t.Fatal(err)
- }
- }()
-
- // lock the file
- l, err := LockFile(f.Name(), os.O_WRONLY, PrivateFileMode)
- if err != nil {
- t.Fatal(err)
- }
-
- // try lock a locked file
- if _, err = TryLockFile(f.Name(), os.O_WRONLY, PrivateFileMode); err != ErrLocked {
- t.Fatal(err)
- }
-
- // unlock the file
- if err = l.Close(); err != nil {
- t.Fatal(err)
- }
-
- // try lock the unlocked file
- dupl, err := TryLockFile(f.Name(), os.O_WRONLY, PrivateFileMode)
- if err != nil {
- t.Errorf("err = %v, want %v", err, nil)
- }
-
- // blocking on locked file
- locked := make(chan struct{}, 1)
- go func() {
- bl, blerr := LockFile(f.Name(), os.O_WRONLY, PrivateFileMode)
- if blerr != nil {
- t.Error(blerr)
- }
- locked <- struct{}{}
- if blerr = bl.Close(); blerr != nil {
- t.Error(blerr)
- }
- }()
-
- select {
- case <-locked:
- t.Error("unexpected unblocking")
- case <-time.After(100 * time.Millisecond):
- }
-
- // unlock
- if err = dupl.Close(); err != nil {
- t.Fatal(err)
- }
-
- // the previously blocked routine should be unblocked
- select {
- case <-locked:
- case <-time.After(1 * time.Second):
- t.Error("unexpected blocking")
- }
-}
diff --git a/client/pkg/fileutil/lock_windows.go b/client/pkg/fileutil/lock_windows.go
deleted file mode 100644
index 51010bdf81c..00000000000
--- a/client/pkg/fileutil/lock_windows.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package fileutil
-
-import (
- "errors"
- "fmt"
- "os"
- "syscall"
-
- "golang.org/x/sys/windows"
-)
-
-var errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file")
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := open(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err := lockFile(windows.Handle(f.Fd()), windows.LOCKFILE_FAIL_IMMEDIATELY); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := open(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err := lockFile(windows.Handle(f.Fd()), 0); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func open(path string, flag int, perm os.FileMode) (*os.File, error) {
- if path == "" {
- return nil, errors.New("cannot open empty filename")
- }
- var access uint32
- switch flag {
- case syscall.O_RDONLY:
- access = syscall.GENERIC_READ
- case syscall.O_WRONLY:
- access = syscall.GENERIC_WRITE
- case syscall.O_RDWR:
- access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
- case syscall.O_WRONLY | syscall.O_CREAT:
- access = syscall.GENERIC_ALL
- default:
- panic(fmt.Errorf("flag %v is not supported", flag))
- }
- fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
- access,
- syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
- nil,
- syscall.OPEN_ALWAYS,
- syscall.FILE_ATTRIBUTE_NORMAL,
- 0)
- if err != nil {
- return nil, err
- }
- return os.NewFile(uintptr(fd), path), nil
-}
-
-func lockFile(fd windows.Handle, flags uint32) error {
- if fd == windows.InvalidHandle {
- return nil
- }
- err := windows.LockFileEx(fd, flags|windows.LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &windows.Overlapped{})
- if err == nil {
- return nil
- } else if err.Error() == errLocked.Error() {
- return ErrLocked
- } else if err != windows.ERROR_LOCK_VIOLATION {
- return err
- }
- return nil
-}
diff --git a/client/pkg/fileutil/preallocate.go b/client/pkg/fileutil/preallocate.go
deleted file mode 100644
index c747b7cf81f..00000000000
--- a/client/pkg/fileutil/preallocate.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "io"
- "os"
-)
-
-// Preallocate tries to allocate the space for given
-// file. This operation is only supported on linux by a
-// few filesystems (btrfs, ext4, etc.).
-// If the operation is unsupported, no error will be returned.
-// Otherwise, the error encountered will be returned.
-func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
- if sizeInBytes == 0 {
- // fallocate will return EINVAL if length is 0; skip
- return nil
- }
- if extendFile {
- return preallocExtend(f, sizeInBytes)
- }
- return preallocFixed(f, sizeInBytes)
-}
-
-func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
- curOff, err := f.Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- size, err := f.Seek(sizeInBytes, io.SeekEnd)
- if err != nil {
- return err
- }
- if _, err = f.Seek(curOff, io.SeekStart); err != nil {
- return err
- }
- if sizeInBytes > size {
- return nil
- }
- return f.Truncate(sizeInBytes)
-}
diff --git a/client/pkg/fileutil/preallocate_test.go b/client/pkg/fileutil/preallocate_test.go
deleted file mode 100644
index 47a006704b2..00000000000
--- a/client/pkg/fileutil/preallocate_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "os"
- "testing"
-)
-
-func TestPreallocateExtend(t *testing.T) {
- pf := func(f *os.File, sz int64) error { return Preallocate(f, sz, true) }
- tf := func(t *testing.T, f *os.File) { testPreallocateExtend(t, f, pf) }
- runPreallocTest(t, tf)
-}
-
-func TestPreallocateExtendTrunc(t *testing.T) {
- tf := func(t *testing.T, f *os.File) { testPreallocateExtend(t, f, preallocExtendTrunc) }
- runPreallocTest(t, tf)
-}
-
-func testPreallocateExtend(t *testing.T, f *os.File, pf func(*os.File, int64) error) {
- size := int64(64 * 1000)
- if err := pf(f, size); err != nil {
- t.Fatal(err)
- }
-
- stat, err := f.Stat()
- if err != nil {
- t.Fatal(err)
- }
- if stat.Size() != size {
- t.Errorf("size = %d, want %d", stat.Size(), size)
- }
-}
-
-func TestPreallocateFixed(t *testing.T) { runPreallocTest(t, testPreallocateFixed) }
-func testPreallocateFixed(t *testing.T, f *os.File) {
- size := int64(64 * 1000)
- if err := Preallocate(f, size, false); err != nil {
- t.Fatal(err)
- }
-
- stat, err := f.Stat()
- if err != nil {
- t.Fatal(err)
- }
- if stat.Size() != 0 {
- t.Errorf("size = %d, want %d", stat.Size(), 0)
- }
-}
-
-func runPreallocTest(t *testing.T, test func(*testing.T, *os.File)) {
- p := t.TempDir()
-
- f, err := os.CreateTemp(p, "")
- if err != nil {
- t.Fatal(err)
- }
- test(t, f)
-}
diff --git a/client/pkg/fileutil/preallocate_unix.go b/client/pkg/fileutil/preallocate_unix.go
deleted file mode 100644
index b02070b30b3..00000000000
--- a/client/pkg/fileutil/preallocate_unix.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-func preallocExtend(f *os.File, sizeInBytes int64) error {
- // use mode = 0 to change size
- err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
- if err != nil {
- errno, ok := err.(syscall.Errno)
- // not supported; fallback
- // fallocate EINTRs frequently in some environments; fallback
- if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
- return preallocExtendTrunc(f, sizeInBytes)
- }
- }
- return err
-}
-
-func preallocFixed(f *os.File, sizeInBytes int64) error {
- // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
- err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
- if err != nil {
- errno, ok := err.(syscall.Errno)
- // treat not supported as nil error
- if ok && errno == syscall.ENOTSUP {
- return nil
- }
- }
- return err
-}
diff --git a/client/pkg/fileutil/purge_test.go b/client/pkg/fileutil/purge_test.go
deleted file mode 100644
index a10a3283be1..00000000000
--- a/client/pkg/fileutil/purge_test.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "reflect"
- "testing"
- "time"
-
- "go.uber.org/zap/zaptest"
-)
-
-func TestPurgeFile(t *testing.T) {
- dir := t.TempDir()
-
- // minimal file set
- for i := 0; i < 3; i++ {
- f, ferr := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", i)))
- if ferr != nil {
- t.Fatal(ferr)
- }
- f.Close()
- }
-
- stop, purgec := make(chan struct{}), make(chan string, 10)
-
- // keep 3 most recent files
- errch := purgeFile(zaptest.NewLogger(t), dir, "test", 3, time.Millisecond, stop, purgec, nil)
- select {
- case f := <-purgec:
- t.Errorf("unexpected purge on %q", f)
- case <-time.After(10 * time.Millisecond):
- }
-
- // rest of the files
- for i := 4; i < 10; i++ {
- go func(n int) {
- f, ferr := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", n)))
- if ferr != nil {
- t.Error(ferr)
- }
- f.Close()
- }(i)
- }
-
- // watch files purge away
- for i := 4; i < 10; i++ {
- select {
- case <-purgec:
- case <-time.After(time.Second):
- t.Errorf("purge took too long")
- }
- }
-
- fnames, rerr := ReadDir(dir)
- if rerr != nil {
- t.Fatal(rerr)
- }
- wnames := []string{"7.test", "8.test", "9.test"}
- if !reflect.DeepEqual(fnames, wnames) {
- t.Errorf("filenames = %v, want %v", fnames, wnames)
- }
-
- // no error should be reported from purge routine
- select {
- case f := <-purgec:
- t.Errorf("unexpected purge on %q", f)
- case err := <-errch:
- t.Errorf("unexpected purge error %v", err)
- case <-time.After(10 * time.Millisecond):
- }
- close(stop)
-}
-
-func TestPurgeFileHoldingLockFile(t *testing.T) {
- dir := t.TempDir()
-
- for i := 0; i < 10; i++ {
- var f *os.File
- f, err := os.Create(filepath.Join(dir, fmt.Sprintf("%d.test", i)))
- if err != nil {
- t.Fatal(err)
- }
- f.Close()
- }
-
- // create a purge barrier at 5
- p := filepath.Join(dir, fmt.Sprintf("%d.test", 5))
- l, err := LockFile(p, os.O_WRONLY, PrivateFileMode)
- if err != nil {
- t.Fatal(err)
- }
-
- stop, purgec := make(chan struct{}), make(chan string, 10)
- errch := purgeFile(zaptest.NewLogger(t), dir, "test", 3, time.Millisecond, stop, purgec, nil)
-
- for i := 0; i < 5; i++ {
- select {
- case <-purgec:
- case <-time.After(time.Second):
- t.Fatalf("purge took too long")
- }
- }
-
- fnames, rerr := ReadDir(dir)
- if rerr != nil {
- t.Fatal(rerr)
- }
-
- wnames := []string{"5.test", "6.test", "7.test", "8.test", "9.test"}
- if !reflect.DeepEqual(fnames, wnames) {
- t.Errorf("filenames = %v, want %v", fnames, wnames)
- }
-
- select {
- case s := <-purgec:
- t.Errorf("unexpected purge %q", s)
- case err = <-errch:
- t.Errorf("unexpected purge error %v", err)
- case <-time.After(10 * time.Millisecond):
- }
-
- // remove the purge barrier
- if err = l.Close(); err != nil {
- t.Fatal(err)
- }
-
- // wait for rest of purges (5, 6)
- for i := 0; i < 2; i++ {
- select {
- case <-purgec:
- case <-time.After(time.Second):
- t.Fatalf("purge took too long")
- }
- }
-
- fnames, rerr = ReadDir(dir)
- if rerr != nil {
- t.Fatal(rerr)
- }
- wnames = []string{"7.test", "8.test", "9.test"}
- if !reflect.DeepEqual(fnames, wnames) {
- t.Errorf("filenames = %v, want %v", fnames, wnames)
- }
-
- select {
- case f := <-purgec:
- t.Errorf("unexpected purge on %q", f)
- case err := <-errch:
- t.Errorf("unexpected purge error %v", err)
- case <-time.After(10 * time.Millisecond):
- }
-
- close(stop)
-}
diff --git a/client/pkg/fileutil/read_dir_test.go b/client/pkg/fileutil/read_dir_test.go
deleted file mode 100644
index 79a37d886ca..00000000000
--- a/client/pkg/fileutil/read_dir_test.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "os"
- "path/filepath"
- "reflect"
- "testing"
-)
-
-func TestReadDir(t *testing.T) {
- tmpdir := t.TempDir()
-
- files := []string{"def", "abc", "xyz", "ghi"}
- for _, f := range files {
- writeFunc(t, filepath.Join(tmpdir, f))
- }
- fs, err := ReadDir(tmpdir)
- if err != nil {
- t.Fatalf("error calling ReadDir: %v", err)
- }
- wfs := []string{"abc", "def", "ghi", "xyz"}
- if !reflect.DeepEqual(fs, wfs) {
- t.Fatalf("ReadDir: got %v, want %v", fs, wfs)
- }
-
- files = []string{"def.wal", "abc.wal", "xyz.wal", "ghi.wal"}
- for _, f := range files {
- writeFunc(t, filepath.Join(tmpdir, f))
- }
- fs, err = ReadDir(tmpdir, WithExt(".wal"))
- if err != nil {
- t.Fatalf("error calling ReadDir: %v", err)
- }
- wfs = []string{"abc.wal", "def.wal", "ghi.wal", "xyz.wal"}
- if !reflect.DeepEqual(fs, wfs) {
- t.Fatalf("ReadDir: got %v, want %v", fs, wfs)
- }
-}
-
-func writeFunc(t *testing.T, path string) {
- fh, err := os.Create(path)
- if err != nil {
- t.Fatalf("error creating file: %v", err)
- }
- if err = fh.Close(); err != nil {
- t.Fatalf("error closing file: %v", err)
- }
-}
diff --git a/client/pkg/go.mod b/client/pkg/go.mod
deleted file mode 100644
index d961c419937..00000000000
--- a/client/pkg/go.mod
+++ /dev/null
@@ -1,19 +0,0 @@
-module go.etcd.io/etcd/client/pkg/v3
-
-go 1.19
-
-require (
- github.com/coreos/go-systemd/v22 v22.5.0
- github.com/stretchr/testify v1.8.1
- go.uber.org/zap v1.24.0
- golang.org/x/sys v0.0.0-20210603125802-9665404d3644
-)
-
-require (
- github.com/benbjohnson/clock v1.1.0 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.9.0 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
-)
diff --git a/client/pkg/go.sum b/client/pkg/go.sum
deleted file mode 100644
index bd0107f5d70..00000000000
--- a/client/pkg/go.sum
+++ /dev/null
@@ -1,33 +0,0 @@
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
-github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
-go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
-go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/client/pkg/logutil/log_format.go b/client/pkg/logutil/log_format.go
deleted file mode 100644
index 494ab33fb97..00000000000
--- a/client/pkg/logutil/log_format.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import "fmt"
-
-const (
- JsonLogFormat = "json"
- ConsoleLogFormat = "console"
-)
-
-var DefaultLogFormat = JsonLogFormat
-
-// ConvertToZapFormat converts and validated log format string.
-func ConvertToZapFormat(format string) (string, error) {
- switch format {
- case ConsoleLogFormat:
- return ConsoleLogFormat, nil
- case JsonLogFormat:
- return JsonLogFormat, nil
- case "":
- return DefaultLogFormat, nil
- default:
- return "", fmt.Errorf("unknown log format: %s, supported values json, console", format)
- }
-}
diff --git a/client/pkg/logutil/log_format_test.go b/client/pkg/logutil/log_format_test.go
deleted file mode 100644
index 3c17061db7e..00000000000
--- a/client/pkg/logutil/log_format_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "testing"
-)
-
-func TestLogFormat(t *testing.T) {
- tests := []struct {
- given string
- want string
- errExpected bool
- }{
- {"json", JsonLogFormat, false},
- {"console", ConsoleLogFormat, false},
- {"", JsonLogFormat, false},
- {"konsole", "", true},
- }
-
- for i, tt := range tests {
- got, err := ConvertToZapFormat(tt.given)
- if got != tt.want {
- t.Errorf("#%d: ConvertToZapFormat failure: want=%v, got=%v", i, tt.want, got)
- }
-
- if err != nil {
- if !tt.errExpected {
- t.Errorf("#%d: ConvertToZapFormat unexpected error: %v", i, err)
- }
- }
- }
-}
diff --git a/client/pkg/logutil/zap_journal_test.go b/client/pkg/logutil/zap_journal_test.go
deleted file mode 100644
index be5efd5d3ec..00000000000
--- a/client/pkg/logutil/zap_journal_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-
-package logutil
-
-import (
- "bytes"
- "testing"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-func TestNewJournalWriter(t *testing.T) {
- buf := bytes.NewBuffer(nil)
- jw, err := NewJournalWriter(buf)
- if err != nil {
- t.Skip(err)
- }
-
- syncer := zapcore.AddSync(jw)
-
- cr := zapcore.NewCore(
- zapcore.NewJSONEncoder(DefaultZapLoggerConfig.EncoderConfig),
- syncer,
- zap.NewAtomicLevelAt(zap.InfoLevel),
- )
-
- lg := zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer))
- defer lg.Sync()
-
- lg.Info("TestNewJournalWriter")
- if buf.String() == "" {
- // check with "journalctl -f"
- t.Log("sent logs successfully to journald")
- }
-}
diff --git a/client/pkg/pathutil/path.go b/client/pkg/pathutil/path.go
deleted file mode 100644
index f26254ba933..00000000000
--- a/client/pkg/pathutil/path.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package pathutil implements utility functions for handling slash-separated
-// paths.
-package pathutil
-
-import "path"
-
-// CanonicalURLPath returns the canonical url path for p, which follows the rules:
-// 1. the path always starts with "/"
-// 2. replace multiple slashes with a single slash
-// 3. replace each '.' '..' path name element with equivalent one
-// 4. keep the trailing slash
-// The function is borrowed from stdlib http.cleanPath in server.go.
-func CanonicalURLPath(p string) string {
- if p == "" {
- return "/"
- }
- if p[0] != '/' {
- p = "/" + p
- }
- np := path.Clean(p)
- // path.Clean removes trailing slash except for root,
- // put the trailing slash back if necessary.
- if p[len(p)-1] == '/' && np != "/" {
- np += "/"
- }
- return np
-}
diff --git a/client/pkg/pathutil/path_test.go b/client/pkg/pathutil/path_test.go
deleted file mode 100644
index 209fdc93c13..00000000000
--- a/client/pkg/pathutil/path_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pathutil
-
-import "testing"
-
-func TestCanonicalURLPath(t *testing.T) {
- tests := []struct {
- p string
- wp string
- }{
- {"/a", "/a"},
- {"", "/"},
- {"a", "/a"},
- {"//a", "/a"},
- {"/a/.", "/a"},
- {"/a/..", "/"},
- {"/a/", "/a/"},
- {"/a//", "/a/"},
- }
- for i, tt := range tests {
- if g := CanonicalURLPath(tt.p); g != tt.wp {
- t.Errorf("#%d: canonical path = %s, want %s", i, g, tt.wp)
- }
- }
-}
diff --git a/client/pkg/srv/srv_test.go b/client/pkg/srv/srv_test.go
deleted file mode 100644
index a61938fec89..00000000000
--- a/client/pkg/srv/srv_test.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package srv
-
-import (
- "errors"
- "fmt"
- "net"
- "reflect"
- "strings"
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-func notFoundErr(service, proto, domain string) error {
- name := fmt.Sprintf("_%s._%s.%s", service, proto, domain)
- return &net.DNSError{Err: "no such host", Name: name, Server: "10.0.0.53:53", IsTimeout: false, IsTemporary: false, IsNotFound: true}
-}
-
-func TestSRVGetCluster(t *testing.T) {
- defer func() {
- lookupSRV = net.LookupSRV
- resolveTCPAddr = net.ResolveTCPAddr
- }()
-
- hasErr := func(err error) bool {
- return err != nil
- }
-
- name := "dnsClusterTest"
- dns := map[string]string{
- "1.example.com.:2480": "10.0.0.1:2480",
- "2.example.com.:2480": "10.0.0.2:2480",
- "3.example.com.:2480": "10.0.0.3:2480",
- "4.example.com.:2380": "10.0.0.3:2380",
- }
- srvAll := []*net.SRV{
- {Target: "1.example.com.", Port: 2480},
- {Target: "2.example.com.", Port: 2480},
- {Target: "3.example.com.", Port: 2480},
- }
- var srvNone []*net.SRV
-
- tests := []struct {
- service string
- scheme string
- withSSL []*net.SRV
- withoutSSL []*net.SRV
- urls []string
- expected string
- werr bool
- }{
- {
- "etcd-server-ssl",
- "https",
- srvNone,
- srvNone,
- nil,
- "",
- true,
- },
- {
- "etcd-server-ssl",
- "https",
- srvAll,
- srvNone,
- nil,
- "0=https://1.example.com:2480,1=https://2.example.com:2480,2=https://3.example.com:2480",
- false,
- },
- {
- "etcd-server",
- "http",
- srvNone,
- srvAll,
- nil,
- "0=http://1.example.com:2480,1=http://2.example.com:2480,2=http://3.example.com:2480",
- false,
- },
- {
- "etcd-server-ssl",
- "https",
- srvAll,
- srvNone,
- []string{"https://10.0.0.1:2480"},
- "dnsClusterTest=https://1.example.com:2480,0=https://2.example.com:2480,1=https://3.example.com:2480",
- false,
- },
- // matching local member with resolved addr and return unresolved hostnames
- {
- "etcd-server-ssl",
- "https",
- srvAll,
- srvNone,
- []string{"https://10.0.0.1:2480"},
- "dnsClusterTest=https://1.example.com:2480,0=https://2.example.com:2480,1=https://3.example.com:2480",
- false,
- },
- // reject if apurls are TLS but SRV is only http
- {
- "etcd-server",
- "http",
- srvNone,
- srvAll,
- []string{"https://10.0.0.1:2480"},
- "0=http://2.example.com:2480,1=http://3.example.com:2480",
- false,
- },
- }
-
- resolveTCPAddr = func(network, addr string) (*net.TCPAddr, error) {
- if strings.Contains(addr, "10.0.0.") {
- // accept IP addresses when resolving apurls
- return net.ResolveTCPAddr(network, addr)
- }
- if dns[addr] == "" {
- return nil, errors.New("missing dns record")
- }
- return net.ResolveTCPAddr(network, dns[addr])
- }
-
- for i, tt := range tests {
- lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) {
- if service == "etcd-server-ssl" {
- if len(tt.withSSL) > 0 {
- return "", tt.withSSL, nil
- }
- return "", nil, notFoundErr(service, proto, domain)
- }
- if service == "etcd-server" {
- if len(tt.withoutSSL) > 0 {
- return "", tt.withoutSSL, nil
- }
- return "", nil, notFoundErr(service, proto, domain)
- }
- return "", nil, errors.New("unknown service in mock")
- }
-
- urls := testutil.MustNewURLs(t, tt.urls)
- str, err := GetCluster(tt.scheme, tt.service, name, "example.com", urls)
-
- if hasErr(err) != tt.werr {
- t.Fatalf("%d: err = %#v, want = %#v", i, err, tt.werr)
- }
- if strings.Join(str, ",") != tt.expected {
- t.Errorf("#%d: cluster = %s, want %s", i, str, tt.expected)
- }
- }
-}
-
-func TestSRVDiscover(t *testing.T) {
- defer func() { lookupSRV = net.LookupSRV }()
-
- hasErr := func(err error) bool {
- return err != nil
- }
-
- tests := []struct {
- withSSL []*net.SRV
- withoutSSL []*net.SRV
- expected []string
- werr bool
- }{
- {
- []*net.SRV{},
- []*net.SRV{},
- []string{},
- true,
- },
- {
- []*net.SRV{},
- []*net.SRV{
- {Target: "10.0.0.1", Port: 2480},
- {Target: "10.0.0.2", Port: 2480},
- {Target: "10.0.0.3", Port: 2480},
- },
- []string{"http://10.0.0.1:2480", "http://10.0.0.2:2480", "http://10.0.0.3:2480"},
- false,
- },
- {
- []*net.SRV{
- {Target: "10.0.0.1", Port: 2480},
- {Target: "10.0.0.2", Port: 2480},
- {Target: "10.0.0.3", Port: 2480},
- },
- []*net.SRV{},
- []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480"},
- false,
- },
- {
- []*net.SRV{
- {Target: "10.0.0.1", Port: 2480},
- {Target: "10.0.0.2", Port: 2480},
- {Target: "10.0.0.3", Port: 2480},
- },
- []*net.SRV{
- {Target: "10.0.0.1", Port: 7001},
- },
- []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"},
- false,
- },
- {
- []*net.SRV{
- {Target: "10.0.0.1", Port: 2480},
- {Target: "10.0.0.2", Port: 2480},
- {Target: "10.0.0.3", Port: 2480},
- },
- []*net.SRV{
- {Target: "10.0.0.1", Port: 7001},
- },
- []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"},
- false,
- },
- {
- []*net.SRV{
- {Target: "a.example.com", Port: 2480},
- {Target: "b.example.com", Port: 2480},
- {Target: "c.example.com.", Port: 2480},
- },
- []*net.SRV{},
- []string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com.:2480"},
- false,
- },
- }
-
- for i, tt := range tests {
- lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) {
- if service == "etcd-client-ssl" {
- if len(tt.withSSL) > 0 {
- return "", tt.withSSL, nil
- }
- return "", nil, notFoundErr(service, proto, domain)
- }
- if service == "etcd-client" {
- if len(tt.withoutSSL) > 0 {
- return "", tt.withoutSSL, nil
- }
- return "", nil, notFoundErr(service, proto, domain)
- }
- return "", nil, errors.New("unknown service in mock")
- }
-
- srvs, err := GetClient("etcd-client", "example.com", "")
-
- if hasErr(err) != tt.werr {
- t.Fatalf("%d: err = %#v, want = %#v", i, err, tt.werr)
- }
- if srvs == nil {
- if len(tt.expected) > 0 {
- t.Errorf("#%d: srvs = nil, want non-nil", i)
- }
- } else {
- if !reflect.DeepEqual(srvs.Endpoints, tt.expected) {
- t.Errorf("#%d: endpoints = %v, want = %v", i, srvs.Endpoints, tt.expected)
- }
- }
- }
-}
-
-func TestGetSRVService(t *testing.T) {
- tests := []struct {
- scheme string
- serviceName string
-
- expected string
- }{
- {
- "https",
- "",
- "etcd-client-ssl",
- },
- {
- "http",
- "",
- "etcd-client",
- },
- {
- "https",
- "foo",
- "etcd-client-ssl-foo",
- },
- {
- "http",
- "bar",
- "etcd-client-bar",
- },
- }
-
- for i, tt := range tests {
- service := GetSRVService("etcd-client", tt.serviceName, tt.scheme)
- if strings.Compare(service, tt.expected) != 0 {
- t.Errorf("#%d: service = %s, want %s", i, service, tt.expected)
- }
- }
-}
diff --git a/client/pkg/testutil/assert.go b/client/pkg/testutil/assert.go
deleted file mode 100644
index ef820748e64..00000000000
--- a/client/pkg/testutil/assert.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "reflect"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func copyToInterface(msg ...string) []interface{} {
- newMsg := make([]interface{}, len(msg))
- for i, v := range msg {
- newMsg[i] = v
- }
- return newMsg
-}
-
-func AssertNil(t *testing.T, v interface{}) {
- t.Helper()
- assert.Nil(t, v)
-}
-
-func AssertNotNil(t *testing.T, v interface{}) {
- t.Helper()
- if v == nil {
- t.Fatalf("expected non-nil, got %+v", v)
- }
-}
-
-func AssertTrue(t *testing.T, v bool, msg ...string) {
- t.Helper()
- newMsg := copyToInterface(msg...)
- assert.Equal(t, true, v, newMsg)
-}
-
-func AssertFalse(t *testing.T, v bool, msg ...string) {
- t.Helper()
- newMsg := copyToInterface(msg...)
- assert.Equal(t, false, v, newMsg)
-}
-
-func isNil(v interface{}) bool {
- if v == nil {
- return true
- }
- rv := reflect.ValueOf(v)
- return rv.Kind() != reflect.Struct && rv.IsNil()
-}
diff --git a/client/pkg/testutil/before.go b/client/pkg/testutil/before.go
deleted file mode 100644
index 1f8c1fa72a5..00000000000
--- a/client/pkg/testutil/before.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "log"
- "os"
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "go.etcd.io/etcd/client/pkg/v3/verify"
-)
-
-func BeforeTest(t testing.TB) {
- RegisterLeakDetection(t)
-
- revertVerifyFunc := verify.EnableAllVerifications()
-
- path, err := os.Getwd()
- assert.NoError(t, err)
- tempDir := t.TempDir()
- assert.NoError(t, os.Chdir(tempDir))
- t.Logf("Changing working directory to: %s", tempDir)
-
- t.Cleanup(func() {
- revertVerifyFunc()
- assert.NoError(t, os.Chdir(path))
- })
-}
-
-func BeforeIntegrationExamples(*testing.M) func() {
- ExitInShortMode("Skipping: the tests require real cluster")
-
- tempDir, err := os.MkdirTemp(os.TempDir(), "etcd-integration")
- if err != nil {
- log.Printf("Failed to obtain tempDir: %v", tempDir)
- os.Exit(1)
- }
-
- err = os.Chdir(tempDir)
- if err != nil {
- log.Printf("Failed to change working dir to: %s: %v", tempDir, err)
- os.Exit(1)
- }
- log.Printf("Running tests (examples) in dir(%v): ...", tempDir)
- return func() { os.RemoveAll(tempDir) }
-}
diff --git a/client/pkg/testutil/leak_test.go b/client/pkg/testutil/leak_test.go
deleted file mode 100644
index 71b1c7bf3e6..00000000000
--- a/client/pkg/testutil/leak_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package testutil
-
-import (
- "fmt"
- "os"
- "testing"
-)
-
-// so tests pass if given a -run that doesn't include TestSample
-var ranSample = false
-
-func TestMain(m *testing.M) {
- m.Run()
- isLeaked := CheckLeakedGoroutine()
- if ranSample && !isLeaked {
- fmt.Fprintln(os.Stderr, "expected leaky goroutines but none is detected")
- os.Exit(1)
- }
- os.Exit(0)
-}
-
-func TestSample(t *testing.T) {
- SkipTestIfShortMode(t, "Counting leaked routines is disabled in --short tests")
- defer afterTest(t)
- ranSample = true
- for range make([]struct{}, 100) {
- go func() {
- select {}
- }()
- }
-}
diff --git a/client/pkg/tlsutil/cipher_suites.go b/client/pkg/tlsutil/cipher_suites.go
deleted file mode 100644
index e1f21755d4b..00000000000
--- a/client/pkg/tlsutil/cipher_suites.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tlsutil
-
-import (
- "crypto/tls"
- "fmt"
-)
-
-// GetCipherSuite returns the corresponding cipher suite,
-// and boolean value if it is supported.
-func GetCipherSuite(s string) (uint16, bool) {
- for _, c := range tls.CipherSuites() {
- if s == c.Name {
- return c.ID, true
- }
- }
- for _, c := range tls.InsecureCipherSuites() {
- if s == c.Name {
- return c.ID, true
- }
- }
- switch s {
- case "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305":
- return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, true
- case "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305":
- return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, true
- }
- return 0, false
-}
-
-// GetCipherSuites returns list of corresponding cipher suite IDs.
-func GetCipherSuites(ss []string) ([]uint16, error) {
- cs := make([]uint16, len(ss))
- for i, s := range ss {
- var ok bool
- cs[i], ok = GetCipherSuite(s)
- if !ok {
- return nil, fmt.Errorf("unexpected TLS cipher suite %q", s)
- }
- }
-
- return cs, nil
-}
diff --git a/client/pkg/tlsutil/cipher_suites_test.go b/client/pkg/tlsutil/cipher_suites_test.go
deleted file mode 100644
index a17b46c2fee..00000000000
--- a/client/pkg/tlsutil/cipher_suites_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tlsutil
-
-import (
- "crypto/tls"
- "testing"
-)
-
-func TestGetCipherSuite_not_existing(t *testing.T) {
- _, ok := GetCipherSuite("not_existing")
- if ok {
- t.Fatal("Expected not ok")
- }
-}
-
-func CipherSuiteExpectedToExist(tb testing.TB, cipher string, expectedId uint16) {
- vid, ok := GetCipherSuite(cipher)
- if !ok {
- tb.Errorf("Expected %v cipher to exist", cipher)
- }
- if vid != expectedId {
- tb.Errorf("For %v expected=%v found=%v", cipher, expectedId, vid)
- }
-}
-
-func TestGetCipherSuite_success(t *testing.T) {
- CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA)
- CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256)
-
- // Explicit test for legacy names
- CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256)
- CipherSuiteExpectedToExist(t, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256)
-}
-
-func TestGetCipherSuite_insecure(t *testing.T) {
- CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA)
-}
diff --git a/client/pkg/tlsutil/versions.go b/client/pkg/tlsutil/versions.go
deleted file mode 100644
index ffcecd8c670..00000000000
--- a/client/pkg/tlsutil/versions.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2023 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tlsutil
-
-import (
- "crypto/tls"
- "fmt"
-)
-
-type TLSVersion string
-
-// Constants for TLS versions.
-const (
- TLSVersionDefault TLSVersion = ""
- TLSVersion12 TLSVersion = "TLS1.2"
- TLSVersion13 TLSVersion = "TLS1.3"
-)
-
-// GetTLSVersion returns the corresponding tls.Version or error.
-func GetTLSVersion(version string) (uint16, error) {
- var v uint16
-
- switch version {
- case string(TLSVersionDefault):
- v = 0 // 0 means let Go decide.
- case string(TLSVersion12):
- v = tls.VersionTLS12
- case string(TLSVersion13):
- v = tls.VersionTLS13
- default:
- return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version)
- }
-
- return v, nil
-}
diff --git a/client/pkg/tlsutil/versions_test.go b/client/pkg/tlsutil/versions_test.go
deleted file mode 100644
index 89c7c3f64b7..00000000000
--- a/client/pkg/tlsutil/versions_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2023 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tlsutil
-
-import (
- "crypto/tls"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestGetVersion(t *testing.T) {
- tests := []struct {
- name string
- version string
- want uint16
- expectError bool
- }{
- {
- name: "TLS1.2",
- version: "TLS1.2",
- want: tls.VersionTLS12,
- },
- {
- name: "TLS1.3",
- version: "TLS1.3",
- want: tls.VersionTLS13,
- },
- {
- name: "Empty version",
- version: "",
- want: 0,
- },
- {
- name: "Converting invalid version string to TLS version",
- version: "not_existing",
- expectError: true,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := GetTLSVersion(tt.version)
- if err != nil {
- assert.True(t, tt.expectError, "GetTLSVersion() returned error while expecting success: %v", err)
- return
- }
- assert.Equal(t, tt.want, got)
- })
- }
-}
diff --git a/client/pkg/transport/keepalive_listener.go b/client/pkg/transport/keepalive_listener.go
deleted file mode 100644
index 2006a56b7df..00000000000
--- a/client/pkg/transport/keepalive_listener.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "crypto/tls"
- "errors"
- "fmt"
- "net"
- "time"
-)
-
-// NewKeepAliveListener returns a listener that listens on the given address.
-// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
-// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
-// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
-//
-// Note(ahrtr):
-// only `net.TCPConn` supports `SetKeepAlive` and `SetKeepAlivePeriod`
-// by default, so if you want to wrap multiple layers of net.Listener,
-// the `keepaliveListener` should be the one which is closest to the
-// original `net.Listener` implementation, namely `TCPListener`.
-func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) {
- kal := &keepaliveListener{
- Listener: l,
- }
-
- if scheme == "https" {
- if tlscfg == nil {
- return nil, errors.New("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
- }
- return newTLSKeepaliveListener(kal, tlscfg), nil
- }
-
- return kal, nil
-}
-
-type keepaliveListener struct{ net.Listener }
-
-func (kln *keepaliveListener) Accept() (net.Conn, error) {
- c, err := kln.Listener.Accept()
- if err != nil {
- return nil, err
- }
-
- kac, err := createKeepaliveConn(c)
- if err != nil {
- return nil, fmt.Errorf("create keepalive connection failed, %w", err)
- }
- // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
- // default on linux: 30 + 8 * 30
- // default on osx: 30 + 8 * 75
- if err := kac.SetKeepAlive(true); err != nil {
- return nil, fmt.Errorf("SetKeepAlive failed, %w", err)
- }
- if err := kac.SetKeepAlivePeriod(30 * time.Second); err != nil {
- return nil, fmt.Errorf("SetKeepAlivePeriod failed, %w", err)
- }
- return kac, nil
-}
-
-func createKeepaliveConn(c net.Conn) (*keepAliveConn, error) {
- tcpc, ok := c.(*net.TCPConn)
- if !ok {
- return nil, ErrNotTCP
- }
- return &keepAliveConn{tcpc}, nil
-}
-
-type keepAliveConn struct {
- *net.TCPConn
-}
-
-// SetKeepAlive sets keepalive
-func (l *keepAliveConn) SetKeepAlive(doKeepAlive bool) error {
- return l.TCPConn.SetKeepAlive(doKeepAlive)
-}
-
-// SetKeepAlivePeriod sets keepalive period
-func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error {
- return l.TCPConn.SetKeepAlivePeriod(d)
-}
-
-// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
-type tlsKeepaliveListener struct {
- net.Listener
- config *tls.Config
-}
-
-// Accept waits for and returns the next incoming TLS connection.
-// The returned connection c is a *tls.Conn.
-func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
- c, err = l.Listener.Accept()
- if err != nil {
- return
- }
-
- c = tls.Server(c, l.config)
- return c, nil
-}
-
-// NewListener creates a Listener which accepts connections from an inner
-// Listener and wraps each connection with Server.
-// The configuration config must be non-nil and must have
-// at least one certificate.
-func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener {
- l := &tlsKeepaliveListener{}
- l.Listener = inner
- l.config = config
- return l
-}
diff --git a/client/pkg/transport/keepalive_listener_test.go b/client/pkg/transport/keepalive_listener_test.go
deleted file mode 100644
index efe312d94a8..00000000000
--- a/client/pkg/transport/keepalive_listener_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "crypto/tls"
- "net"
- "net/http"
- "testing"
-)
-
-// TestNewKeepAliveListener tests NewKeepAliveListener returns a listener
-// that accepts connections.
-// TODO: verify the keepalive option is set correctly
-func TestNewKeepAliveListener(t *testing.T) {
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected listen error: %v", err)
- }
-
- ln, err = NewKeepAliveListener(ln, "http", nil)
- if err != nil {
- t.Fatalf("unexpected NewKeepAliveListener error: %v", err)
- }
-
- go http.Get("http://" + ln.Addr().String())
- conn, err := ln.Accept()
- if err != nil {
- t.Fatalf("unexpected Accept error: %v", err)
- }
- if _, ok := conn.(*keepAliveConn); !ok {
- t.Fatalf("Unexpected conn type: %T, wanted *keepAliveConn", conn)
- }
- conn.Close()
- ln.Close()
-
- ln, err = net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected Listen error: %v", err)
- }
-
- // tls
- tlsinfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create tmpfile: %v", err)
- }
- tlsInfo := TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile}
- tlsInfo.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
- tlscfg, err := tlsInfo.ServerConfig()
- if err != nil {
- t.Fatalf("unexpected serverConfig error: %v", err)
- }
- tlsln, err := NewKeepAliveListener(ln, "https", tlscfg)
- if err != nil {
- t.Fatalf("unexpected NewKeepAliveListener error: %v", err)
- }
-
- go http.Get("https://" + tlsln.Addr().String())
- conn, err = tlsln.Accept()
- if err != nil {
- t.Fatalf("unexpected Accept error: %v", err)
- }
- if _, ok := conn.(*tls.Conn); !ok {
- t.Errorf("failed to accept *tls.Conn")
- }
- conn.Close()
- tlsln.Close()
-}
-
-func TestNewKeepAliveListenerTLSEmptyConfig(t *testing.T) {
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected listen error: %v", err)
- }
-
- _, err = NewKeepAliveListener(ln, "https", nil)
- if err == nil {
- t.Errorf("err = nil, want not presented error")
- }
-}
diff --git a/client/pkg/transport/listener.go b/client/pkg/transport/listener.go
deleted file mode 100644
index 5e0e13e25a7..00000000000
--- a/client/pkg/transport/listener.go
+++ /dev/null
@@ -1,596 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "context"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/pem"
- "errors"
- "fmt"
- "math/big"
- "net"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/client/pkg/v3/tlsutil"
- "go.etcd.io/etcd/client/pkg/v3/verify"
-
- "go.uber.org/zap"
-)
-
-// NewListener creates a new listner.
-func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) {
- return newListener(addr, scheme, WithTLSInfo(tlsinfo))
-}
-
-// NewListenerWithOpts creates a new listener which accepts listener options.
-func NewListenerWithOpts(addr, scheme string, opts ...ListenerOption) (net.Listener, error) {
- return newListener(addr, scheme, opts...)
-}
-
-func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, error) {
- if scheme == "unix" || scheme == "unixs" {
- // unix sockets via unix://laddr
- return NewUnixListener(addr)
- }
-
- lnOpts := newListenOpts(opts...)
-
- switch {
- case lnOpts.IsSocketOpts():
- // new ListenConfig with socket options.
- config, err := newListenConfig(lnOpts.socketOpts)
- if err != nil {
- return nil, err
- }
- lnOpts.ListenConfig = config
- // check for timeout
- fallthrough
- case lnOpts.IsTimeout(), lnOpts.IsSocketOpts():
- // timeout listener with socket options.
- ln, err := newKeepAliveListener(&lnOpts.ListenConfig, addr)
- if err != nil {
- return nil, err
- }
- lnOpts.Listener = &rwTimeoutListener{
- Listener: ln,
- readTimeout: lnOpts.readTimeout,
- writeTimeout: lnOpts.writeTimeout,
- }
- case lnOpts.IsTimeout():
- ln, err := newKeepAliveListener(nil, addr)
- if err != nil {
- return nil, err
- }
- lnOpts.Listener = &rwTimeoutListener{
- Listener: ln,
- readTimeout: lnOpts.readTimeout,
- writeTimeout: lnOpts.writeTimeout,
- }
- default:
- ln, err := newKeepAliveListener(nil, addr)
- if err != nil {
- return nil, err
- }
- lnOpts.Listener = ln
- }
-
- // only skip if not passing TLSInfo
- if lnOpts.skipTLSInfoCheck && !lnOpts.IsTLS() {
- return lnOpts.Listener, nil
- }
- return wrapTLS(scheme, lnOpts.tlsInfo, lnOpts.Listener)
-}
-
-func newKeepAliveListener(cfg *net.ListenConfig, addr string) (ln net.Listener, err error) {
- if cfg != nil {
- ln, err = cfg.Listen(context.TODO(), "tcp", addr)
- } else {
- ln, err = net.Listen("tcp", addr)
- }
- if err != nil {
- return
- }
-
- return NewKeepAliveListener(ln, "tcp", nil)
-}
-
-func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) {
- if scheme != "https" && scheme != "unixs" {
- return l, nil
- }
- if tlsinfo != nil && tlsinfo.SkipClientSANVerify {
- return NewTLSListener(l, tlsinfo)
- }
- return newTLSListener(l, tlsinfo, checkSAN)
-}
-
-func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) {
- lc := net.ListenConfig{}
- if sopts != nil {
- ctls := getControls(sopts)
- if len(ctls) > 0 {
- lc.Control = ctls.Control
- }
- }
- return lc, nil
-}
-
-type TLSInfo struct {
- // CertFile is the _server_ cert, it will also be used as a _client_ certificate if ClientCertFile is empty
- CertFile string
- // KeyFile is the key for the CertFile
- KeyFile string
- // ClientCertFile is a _client_ cert for initiating connections when ClientCertAuth is defined. If ClientCertAuth
- // is true but this value is empty, the CertFile will be used instead.
- ClientCertFile string
- // ClientKeyFile is the key for the ClientCertFile
- ClientKeyFile string
-
- TrustedCAFile string
- ClientCertAuth bool
- CRLFile string
- InsecureSkipVerify bool
- SkipClientSANVerify bool
-
- // ServerName ensures the cert matches the given host in case of discovery / virtual hosting
- ServerName string
-
- // HandshakeFailure is optionally called when a connection fails to handshake. The
- // connection will be closed immediately afterwards.
- HandshakeFailure func(*tls.Conn, error)
-
- // CipherSuites is a list of supported cipher suites.
- // If empty, Go auto-populates it by default.
- // Note that cipher suites are prioritized in the given order.
- CipherSuites []uint16
-
- // MinVersion is the minimum TLS version that is acceptable.
- // If not set, the minimum version is TLS 1.2.
- MinVersion uint16
-
- // MaxVersion is the maximum TLS version that is acceptable.
- // If not set, the default used by Go is selected (see tls.Config.MaxVersion).
- MaxVersion uint16
-
- selfCert bool
-
- // parseFunc exists to simplify testing. Typically, parseFunc
- // should be left nil. In that case, tls.X509KeyPair will be used.
- parseFunc func([]byte, []byte) (tls.Certificate, error)
-
- // AllowedCN is a CN which must be provided by a client.
- AllowedCN string
-
- // AllowedHostname is an IP address or hostname that must match the TLS
- // certificate provided by a client.
- AllowedHostname string
-
- // Logger logs TLS errors.
- // If nil, all logs are discarded.
- Logger *zap.Logger
-
- // EmptyCN indicates that the cert must have empty CN.
- // If true, ClientConfig() will return an error for a cert with non empty CN.
- EmptyCN bool
-}
-
-func (info TLSInfo) String() string {
- return fmt.Sprintf("cert = %s, key = %s, client-cert=%s, client-key=%s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.ClientCertFile, info.ClientKeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile)
-}
-
-func (info TLSInfo) Empty() bool {
- return info.CertFile == "" && info.KeyFile == ""
-}
-
-func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) {
- verify.Assert(lg != nil, "nil log isn't allowed")
- info.Logger = lg
- if selfSignedCertValidity == 0 {
- err = errors.New("selfSignedCertValidity is invalid,it should be greater than 0")
- info.Logger.Warn(
- "cannot generate cert",
- zap.Error(err),
- )
- return
- }
- err = fileutil.TouchDirAll(lg, dirpath)
- if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot create cert directory",
- zap.Error(err),
- )
- }
- return
- }
-
- certPath, err := filepath.Abs(filepath.Join(dirpath, "cert.pem"))
- if err != nil {
- return
- }
- keyPath, err := filepath.Abs(filepath.Join(dirpath, "key.pem"))
- if err != nil {
- return
- }
- _, errcert := os.Stat(certPath)
- _, errkey := os.Stat(keyPath)
- if errcert == nil && errkey == nil {
- info.CertFile = certPath
- info.KeyFile = keyPath
- info.ClientCertFile = certPath
- info.ClientKeyFile = keyPath
- info.selfCert = true
- return
- }
-
- serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
- serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
- if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot generate random number",
- zap.Error(err),
- )
- }
- return
- }
-
- tmpl := x509.Certificate{
- SerialNumber: serialNumber,
- Subject: pkix.Name{Organization: []string{"etcd"}},
- NotBefore: time.Now(),
- NotAfter: time.Now().Add(time.Duration(selfSignedCertValidity) * 365 * (24 * time.Hour)),
-
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
- ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...),
- BasicConstraintsValid: true,
- }
-
- if info.Logger != nil {
- info.Logger.Warn(
- "automatically generate certificates",
- zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter),
- )
- }
-
- for _, host := range hosts {
- h, _, _ := net.SplitHostPort(host)
- if ip := net.ParseIP(h); ip != nil {
- tmpl.IPAddresses = append(tmpl.IPAddresses, ip)
- } else {
- tmpl.DNSNames = append(tmpl.DNSNames, h)
- }
- }
-
- priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
- if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot generate ECDSA key",
- zap.Error(err),
- )
- }
- return
- }
-
- derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
- if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot generate x509 certificate",
- zap.Error(err),
- )
- }
- return
- }
-
- certOut, err := os.Create(certPath)
- if err != nil {
- info.Logger.Warn(
- "cannot cert file",
- zap.String("path", certPath),
- zap.Error(err),
- )
- return
- }
- pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
- certOut.Close()
- if info.Logger != nil {
- info.Logger.Info("created cert file", zap.String("path", certPath))
- }
-
- b, err := x509.MarshalECPrivateKey(priv)
- if err != nil {
- return
- }
- keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
- if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "cannot key file",
- zap.String("path", keyPath),
- zap.Error(err),
- )
- }
- return
- }
- pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
- keyOut.Close()
- if info.Logger != nil {
- info.Logger.Info("created key file", zap.String("path", keyPath))
- }
- return SelfCert(lg, dirpath, hosts, selfSignedCertValidity)
-}
-
-// baseConfig is called on initial TLS handshake start.
-//
-// Previously,
-// 1. Server has non-empty (*tls.Config).Certificates on client hello
-// 2. Server calls (*tls.Config).GetCertificate iff:
-// - Server's (*tls.Config).Certificates is not empty, or
-// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
-//
-// When (*tls.Config).Certificates is always populated on initial handshake,
-// client is expected to provide a valid matching SNI to pass the TLS
-// verification, thus trigger server (*tls.Config).GetCertificate to reload
-// TLS assets. However, a cert whose SAN field does not include domain names
-// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus
-// it was never able to trigger TLS reload on initial handshake; first
-// ceritifcate object was being used, never being updated.
-//
-// Now, (*tls.Config).Certificates is created empty on initial TLS client
-// handshake, in order to trigger (*tls.Config).GetCertificate and populate
-// rest of the certificates on every new TLS connection, even when client
-// SNI is empty (e.g. cert only includes IPs).
-func (info TLSInfo) baseConfig() (*tls.Config, error) {
- if info.KeyFile == "" || info.CertFile == "" {
- return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
- }
- if info.Logger == nil {
- info.Logger = zap.NewNop()
- }
-
- _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
- if err != nil {
- return nil, err
- }
-
- // Perform prevalidation of client cert and key if either are provided. This makes sure we crash before accepting any connections.
- if (info.ClientKeyFile == "") != (info.ClientCertFile == "") {
- return nil, fmt.Errorf("ClientKeyFile and ClientCertFile must both be present or both absent: key: %v, cert: %v]", info.ClientKeyFile, info.ClientCertFile)
- }
- if info.ClientCertFile != "" {
- _, err := tlsutil.NewCert(info.ClientCertFile, info.ClientKeyFile, info.parseFunc)
- if err != nil {
- return nil, err
- }
- }
-
- var minVersion uint16
- if info.MinVersion != 0 {
- minVersion = info.MinVersion
- } else {
- // Default minimum version is TLS 1.2, previous versions are insecure and deprecated.
- minVersion = tls.VersionTLS12
- }
-
- cfg := &tls.Config{
- MinVersion: minVersion,
- MaxVersion: info.MaxVersion,
- ServerName: info.ServerName,
- }
-
- if len(info.CipherSuites) > 0 {
- cfg.CipherSuites = info.CipherSuites
- }
-
- // Client certificates may be verified by either an exact match on the CN,
- // or a more general check of the CN and SANs.
- var verifyCertificate func(*x509.Certificate) bool
- if info.AllowedCN != "" {
- if info.AllowedHostname != "" {
- return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname)
- }
- verifyCertificate = func(cert *x509.Certificate) bool {
- return info.AllowedCN == cert.Subject.CommonName
- }
- }
- if info.AllowedHostname != "" {
- verifyCertificate = func(cert *x509.Certificate) bool {
- return cert.VerifyHostname(info.AllowedHostname) == nil
- }
- }
- if verifyCertificate != nil {
- cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
- for _, chains := range verifiedChains {
- if len(chains) != 0 {
- if verifyCertificate(chains[0]) {
- return nil
- }
- }
- }
- return errors.New("client certificate authentication failed")
- }
- }
-
- // this only reloads certs when there's a client request
- // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching
- cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) {
- cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
- if os.IsNotExist(err) {
- if info.Logger != nil {
- info.Logger.Warn(
- "failed to find peer cert files",
- zap.String("cert-file", info.CertFile),
- zap.String("key-file", info.KeyFile),
- zap.Error(err),
- )
- }
- } else if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "failed to create peer certificate",
- zap.String("cert-file", info.CertFile),
- zap.String("key-file", info.KeyFile),
- zap.Error(err),
- )
- }
- }
- return cert, err
- }
- cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) {
- certfile, keyfile := info.CertFile, info.KeyFile
- if info.ClientCertFile != "" {
- certfile, keyfile = info.ClientCertFile, info.ClientKeyFile
- }
- cert, err = tlsutil.NewCert(certfile, keyfile, info.parseFunc)
- if os.IsNotExist(err) {
- if info.Logger != nil {
- info.Logger.Warn(
- "failed to find client cert files",
- zap.String("cert-file", certfile),
- zap.String("key-file", keyfile),
- zap.Error(err),
- )
- }
- } else if err != nil {
- if info.Logger != nil {
- info.Logger.Warn(
- "failed to create client certificate",
- zap.String("cert-file", certfile),
- zap.String("key-file", keyfile),
- zap.Error(err),
- )
- }
- }
- return cert, err
- }
- return cfg, nil
-}
-
-// cafiles returns a list of CA file paths.
-func (info TLSInfo) cafiles() []string {
- cs := make([]string, 0)
- if info.TrustedCAFile != "" {
- cs = append(cs, info.TrustedCAFile)
- }
- return cs
-}
-
-// ServerConfig generates a tls.Config object for use by an HTTP server.
-func (info TLSInfo) ServerConfig() (*tls.Config, error) {
- cfg, err := info.baseConfig()
- if err != nil {
- return nil, err
- }
-
- if info.Logger == nil {
- info.Logger = zap.NewNop()
- }
-
- cfg.ClientAuth = tls.NoClientCert
- if info.TrustedCAFile != "" || info.ClientCertAuth {
- cfg.ClientAuth = tls.RequireAndVerifyClientCert
- }
-
- cs := info.cafiles()
- if len(cs) > 0 {
- info.Logger.Info("Loading cert pool", zap.Strings("cs", cs),
- zap.Any("tlsinfo", info))
- cp, err := tlsutil.NewCertPool(cs)
- if err != nil {
- return nil, err
- }
- cfg.ClientCAs = cp
- }
-
- // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
- cfg.NextProtos = []string{"h2"}
-
- return cfg, nil
-}
-
-// ClientConfig generates a tls.Config object for use by an HTTP client.
-func (info TLSInfo) ClientConfig() (*tls.Config, error) {
- var cfg *tls.Config
- var err error
-
- if !info.Empty() {
- cfg, err = info.baseConfig()
- if err != nil {
- return nil, err
- }
- } else {
- cfg = &tls.Config{ServerName: info.ServerName}
- }
- cfg.InsecureSkipVerify = info.InsecureSkipVerify
-
- cs := info.cafiles()
- if len(cs) > 0 {
- cfg.RootCAs, err = tlsutil.NewCertPool(cs)
- if err != nil {
- return nil, err
- }
- }
-
- if info.selfCert {
- cfg.InsecureSkipVerify = true
- }
-
- if info.EmptyCN {
- hasNonEmptyCN := false
- cn := ""
- _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) {
- var block *pem.Block
- block, _ = pem.Decode(certPEMBlock)
- cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return tls.Certificate{}, err
- }
- if len(cert.Subject.CommonName) != 0 {
- hasNonEmptyCN = true
- cn = cert.Subject.CommonName
- }
- return tls.X509KeyPair(certPEMBlock, keyPEMBlock)
- })
- if err != nil {
- return nil, err
- }
- if hasNonEmptyCN {
- return nil, fmt.Errorf("cert has non empty Common Name (%s): %s", cn, info.CertFile)
- }
- }
-
- return cfg, nil
-}
-
-// IsClosedConnError returns true if the error is from closing listener, cmux.
-// copied from golang.org/x/net/http2/http2.go
-func IsClosedConnError(err error) bool {
- // 'use of closed network connection' (Go <=1.8)
- // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing)
- // 'mux: listener closed' (cmux.ErrListenerClosed)
- return err != nil && strings.Contains(err.Error(), "closed")
-}
diff --git a/client/pkg/transport/listener_opts.go b/client/pkg/transport/listener_opts.go
deleted file mode 100644
index 7536f6aff46..00000000000
--- a/client/pkg/transport/listener_opts.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "net"
- "time"
-)
-
-type ListenerOptions struct {
- Listener net.Listener
- ListenConfig net.ListenConfig
-
- socketOpts *SocketOpts
- tlsInfo *TLSInfo
- skipTLSInfoCheck bool
- writeTimeout time.Duration
- readTimeout time.Duration
-}
-
-func newListenOpts(opts ...ListenerOption) *ListenerOptions {
- lnOpts := &ListenerOptions{}
- lnOpts.applyOpts(opts)
- return lnOpts
-}
-
-func (lo *ListenerOptions) applyOpts(opts []ListenerOption) {
- for _, opt := range opts {
- opt(lo)
- }
-}
-
-// IsTimeout returns true if the listener has a read/write timeout defined.
-func (lo *ListenerOptions) IsTimeout() bool { return lo.readTimeout != 0 || lo.writeTimeout != 0 }
-
-// IsSocketOpts returns true if the listener options includes socket options.
-func (lo *ListenerOptions) IsSocketOpts() bool {
- if lo.socketOpts == nil {
- return false
- }
- return lo.socketOpts.ReusePort || lo.socketOpts.ReuseAddress
-}
-
-// IsTLS returns true if listner options includes TLSInfo.
-func (lo *ListenerOptions) IsTLS() bool {
- if lo.tlsInfo == nil {
- return false
- }
- return !lo.tlsInfo.Empty()
-}
-
-// ListenerOption are options which can be applied to the listener.
-type ListenerOption func(*ListenerOptions)
-
-// WithTimeout allows for a read or write timeout to be applied to the listener.
-func WithTimeout(read, write time.Duration) ListenerOption {
- return func(lo *ListenerOptions) {
- lo.writeTimeout = write
- lo.readTimeout = read
- }
-}
-
-// WithSocketOpts defines socket options that will be applied to the listener.
-func WithSocketOpts(s *SocketOpts) ListenerOption {
- return func(lo *ListenerOptions) { lo.socketOpts = s }
-}
-
-// WithTLSInfo adds TLS credentials to the listener.
-func WithTLSInfo(t *TLSInfo) ListenerOption {
- return func(lo *ListenerOptions) { lo.tlsInfo = t }
-}
-
-// WithSkipTLSInfoCheck when true a transport can be created with an https scheme
-// without passing TLSInfo, circumventing not presented error. Skipping this check
-// also requires that TLSInfo is not passed.
-func WithSkipTLSInfoCheck(skip bool) ListenerOption {
- return func(lo *ListenerOptions) { lo.skipTLSInfoCheck = skip }
-}
diff --git a/client/pkg/transport/listener_test.go b/client/pkg/transport/listener_test.go
deleted file mode 100644
index 13277bcd0e0..00000000000
--- a/client/pkg/transport/listener_test.go
+++ /dev/null
@@ -1,575 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "crypto/tls"
- "crypto/x509"
- "errors"
- "net"
- "net/http"
- "os"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "go.uber.org/zap/zaptest"
-)
-
-func createSelfCert(t *testing.T, hosts ...string) (*TLSInfo, error) {
- return createSelfCertEx(t, "127.0.0.1")
-}
-
-func createSelfCertEx(t *testing.T, host string, additionalUsages ...x509.ExtKeyUsage) (*TLSInfo, error) {
- d := t.TempDir()
- info, err := SelfCert(zaptest.NewLogger(t), d, []string{host + ":0"}, 1, additionalUsages...)
- if err != nil {
- return nil, err
- }
- return &info, nil
-}
-
-func fakeCertificateParserFunc(cert tls.Certificate, err error) func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
- return func(certPEMBlock, keyPEMBlock []byte) (tls.Certificate, error) {
- return cert, err
- }
-}
-
-// TestNewListenerTLSInfo tests that NewListener with valid TLSInfo returns
-// a TLS listener that accepts TLS connections.
-func TestNewListenerTLSInfo(t *testing.T) {
- tlsInfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
- testNewListenerTLSInfoAccept(t, *tlsInfo)
-}
-
-func TestNewListenerWithOpts(t *testing.T) {
- tlsInfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- tests := map[string]struct {
- opts []ListenerOption
- scheme string
- expectedErr bool
- }{
- "https scheme no TLSInfo": {
- opts: []ListenerOption{},
- expectedErr: true,
- scheme: "https",
- },
- "https scheme no TLSInfo with skip check": {
- opts: []ListenerOption{WithSkipTLSInfoCheck(true)},
- expectedErr: false,
- scheme: "https",
- },
- "https scheme empty TLSInfo with skip check": {
- opts: []ListenerOption{
- WithSkipTLSInfoCheck(true),
- WithTLSInfo(&TLSInfo{}),
- },
- expectedErr: false,
- scheme: "https",
- },
- "https scheme empty TLSInfo no skip check": {
- opts: []ListenerOption{
- WithTLSInfo(&TLSInfo{}),
- },
- expectedErr: true,
- scheme: "https",
- },
- "https scheme with TLSInfo and skip check": {
- opts: []ListenerOption{
- WithSkipTLSInfoCheck(true),
- WithTLSInfo(tlsInfo),
- },
- expectedErr: false,
- scheme: "https",
- },
- }
- for testName, test := range tests {
- t.Run(testName, func(t *testing.T) {
- ln, err := NewListenerWithOpts("127.0.0.1:0", test.scheme, test.opts...)
- if ln != nil {
- defer ln.Close()
- }
- if test.expectedErr && err == nil {
- t.Fatalf("expected error")
- }
- if !test.expectedErr && err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- })
- }
-}
-
-func TestNewListenerWithSocketOpts(t *testing.T) {
- tlsInfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- tests := map[string]struct {
- opts []ListenerOption
- scheme string
- expectedErr bool
- }{
- "nil socketopts": {
- opts: []ListenerOption{WithSocketOpts(nil)},
- expectedErr: true,
- scheme: "http",
- },
- "empty socketopts": {
- opts: []ListenerOption{WithSocketOpts(&SocketOpts{})},
- expectedErr: true,
- scheme: "http",
- },
-
- "reuse address": {
- opts: []ListenerOption{WithSocketOpts(&SocketOpts{ReuseAddress: true})},
- scheme: "http",
- expectedErr: true,
- },
- "reuse address with TLS": {
- opts: []ListenerOption{
- WithSocketOpts(&SocketOpts{ReuseAddress: true}),
- WithTLSInfo(tlsInfo),
- },
- scheme: "https",
- expectedErr: true,
- },
- "reuse address and port": {
- opts: []ListenerOption{WithSocketOpts(&SocketOpts{ReuseAddress: true, ReusePort: true})},
- scheme: "http",
- expectedErr: false,
- },
- "reuse address and port with TLS": {
- opts: []ListenerOption{
- WithSocketOpts(&SocketOpts{ReuseAddress: true, ReusePort: true}),
- WithTLSInfo(tlsInfo),
- },
- scheme: "https",
- expectedErr: false,
- },
- "reuse port with TLS and timeout": {
- opts: []ListenerOption{
- WithSocketOpts(&SocketOpts{ReusePort: true}),
- WithTLSInfo(tlsInfo),
- WithTimeout(5*time.Second, 5*time.Second),
- },
- scheme: "https",
- expectedErr: false,
- },
- "reuse port with https scheme and no TLSInfo skip check": {
- opts: []ListenerOption{
- WithSocketOpts(&SocketOpts{ReusePort: true}),
- WithSkipTLSInfoCheck(true),
- },
- scheme: "https",
- expectedErr: false,
- },
- "reuse port": {
- opts: []ListenerOption{WithSocketOpts(&SocketOpts{ReusePort: true})},
- scheme: "http",
- expectedErr: false,
- },
- }
- for testName, test := range tests {
- t.Run(testName, func(t *testing.T) {
- ln, err := NewListenerWithOpts("127.0.0.1:0", test.scheme, test.opts...)
- if err != nil {
- t.Fatalf("unexpected NewListenerWithSocketOpts error: %v", err)
- }
- defer ln.Close()
- ln2, err := NewListenerWithOpts(ln.Addr().String(), test.scheme, test.opts...)
- if ln2 != nil {
- ln2.Close()
- }
- if test.expectedErr && err == nil {
- t.Fatalf("expected error")
- }
- if !test.expectedErr && err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
-
- if test.scheme == "http" {
- lnOpts := newListenOpts(test.opts...)
- if !lnOpts.IsSocketOpts() && !lnOpts.IsTimeout() {
- if _, ok := ln.(*keepaliveListener); !ok {
- t.Fatalf("ln: unexpected listener type: %T, wanted *keepaliveListener", ln)
- }
- }
- }
- })
- }
-}
-
-func testNewListenerTLSInfoAccept(t *testing.T, tlsInfo TLSInfo) {
- ln, err := NewListener("127.0.0.1:0", "https", &tlsInfo)
- if err != nil {
- t.Fatalf("unexpected NewListener error: %v", err)
- }
- defer ln.Close()
-
- tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
- cli := &http.Client{Transport: tr}
- go cli.Get("https://" + ln.Addr().String())
-
- conn, err := ln.Accept()
- if err != nil {
- t.Fatalf("unexpected Accept error: %v", err)
- }
- defer conn.Close()
- if _, ok := conn.(*tls.Conn); !ok {
- t.Error("failed to accept *tls.Conn")
- }
-}
-
-// TestNewListenerTLSInfoSkipClientSANVerify tests that if client IP address mismatches
-// with specified address in its certificate the connection is still accepted
-// if the flag SkipClientSANVerify is set (i.e. checkSAN() is disabled for the client side)
-func TestNewListenerTLSInfoSkipClientSANVerify(t *testing.T) {
- tests := []struct {
- skipClientSANVerify bool
- goodClientHost bool
- acceptExpected bool
- }{
- {false, true, true},
- {false, false, false},
- {true, true, true},
- {true, false, true},
- }
- for _, test := range tests {
- testNewListenerTLSInfoClientCheck(t, test.skipClientSANVerify, test.goodClientHost, test.acceptExpected)
- }
-}
-
-func testNewListenerTLSInfoClientCheck(t *testing.T, skipClientSANVerify, goodClientHost, acceptExpected bool) {
- tlsInfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- host := "127.0.0.222"
- if goodClientHost {
- host = "127.0.0.1"
- }
- clientTLSInfo, err := createSelfCertEx(t, host, x509.ExtKeyUsageClientAuth)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- tlsInfo.SkipClientSANVerify = skipClientSANVerify
- tlsInfo.TrustedCAFile = clientTLSInfo.CertFile
-
- rootCAs := x509.NewCertPool()
- loaded, err := os.ReadFile(tlsInfo.CertFile)
- if err != nil {
- t.Fatalf("unexpected missing certfile: %v", err)
- }
- rootCAs.AppendCertsFromPEM(loaded)
-
- clientCert, err := tls.LoadX509KeyPair(clientTLSInfo.CertFile, clientTLSInfo.KeyFile)
- if err != nil {
- t.Fatalf("unable to create peer cert: %v", err)
- }
-
- tlsConfig := &tls.Config{}
- tlsConfig.InsecureSkipVerify = false
- tlsConfig.Certificates = []tls.Certificate{clientCert}
- tlsConfig.RootCAs = rootCAs
-
- ln, err := NewListener("127.0.0.1:0", "https", tlsInfo)
- if err != nil {
- t.Fatalf("unexpected NewListener error: %v", err)
- }
- defer ln.Close()
-
- tr := &http.Transport{TLSClientConfig: tlsConfig}
- cli := &http.Client{Transport: tr}
- chClientErr := make(chan error, 1)
- go func() {
- _, err := cli.Get("https://" + ln.Addr().String())
- chClientErr <- err
- }()
-
- chAcceptErr := make(chan error, 1)
- chAcceptConn := make(chan net.Conn, 1)
- go func() {
- conn, err := ln.Accept()
- if err != nil {
- chAcceptErr <- err
- } else {
- chAcceptConn <- conn
- }
- }()
-
- select {
- case <-chClientErr:
- if acceptExpected {
- t.Errorf("accepted for good client address: skipClientSANVerify=%t, goodClientHost=%t", skipClientSANVerify, goodClientHost)
- }
- case acceptErr := <-chAcceptErr:
- t.Fatalf("unexpected Accept error: %v", acceptErr)
- case conn := <-chAcceptConn:
- defer conn.Close()
- if _, ok := conn.(*tls.Conn); !ok {
- t.Errorf("failed to accept *tls.Conn")
- }
- if !acceptExpected {
- t.Errorf("accepted for bad client address: skipClientSANVerify=%t, goodClientHost=%t", skipClientSANVerify, goodClientHost)
- }
- }
-}
-
-func TestNewListenerTLSEmptyInfo(t *testing.T) {
- _, err := NewListener("127.0.0.1:0", "https", nil)
- if err == nil {
- t.Errorf("err = nil, want not presented error")
- }
-}
-
-func TestNewTransportTLSInfo(t *testing.T) {
- tlsinfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- tests := []TLSInfo{
- {},
- {
- CertFile: tlsinfo.CertFile,
- KeyFile: tlsinfo.KeyFile,
- },
- {
- CertFile: tlsinfo.CertFile,
- KeyFile: tlsinfo.KeyFile,
- TrustedCAFile: tlsinfo.TrustedCAFile,
- },
- {
- TrustedCAFile: tlsinfo.TrustedCAFile,
- },
- }
-
- for i, tt := range tests {
- tt.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
- trans, err := NewTransport(tt, time.Second)
- if err != nil {
- t.Fatalf("Received unexpected error from NewTransport: %v", err)
- }
-
- if trans.TLSClientConfig == nil {
- t.Fatalf("#%d: want non-nil TLSClientConfig", i)
- }
- }
-}
-
-func TestTLSInfoNonexist(t *testing.T) {
- tlsInfo := TLSInfo{CertFile: "@badname", KeyFile: "@badname"}
- _, err := tlsInfo.ServerConfig()
- werr := &os.PathError{
- Op: "open",
- Path: "@badname",
- Err: errors.New("no such file or directory"),
- }
- if err.Error() != werr.Error() {
- t.Errorf("err = %v, want %v", err, werr)
- }
-}
-
-func TestTLSInfoEmpty(t *testing.T) {
- tests := []struct {
- info TLSInfo
- want bool
- }{
- {TLSInfo{}, true},
- {TLSInfo{TrustedCAFile: "baz"}, true},
- {TLSInfo{CertFile: "foo"}, false},
- {TLSInfo{KeyFile: "bar"}, false},
- {TLSInfo{CertFile: "foo", KeyFile: "bar"}, false},
- {TLSInfo{CertFile: "foo", TrustedCAFile: "baz"}, false},
- {TLSInfo{KeyFile: "bar", TrustedCAFile: "baz"}, false},
- {TLSInfo{CertFile: "foo", KeyFile: "bar", TrustedCAFile: "baz"}, false},
- }
-
- for i, tt := range tests {
- got := tt.info.Empty()
- if tt.want != got {
- t.Errorf("#%d: result of Empty() incorrect: want=%t got=%t", i, tt.want, got)
- }
- }
-}
-
-func TestTLSInfoMissingFields(t *testing.T) {
- tlsinfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- tests := []TLSInfo{
- {CertFile: tlsinfo.CertFile},
- {KeyFile: tlsinfo.KeyFile},
- {CertFile: tlsinfo.CertFile, TrustedCAFile: tlsinfo.TrustedCAFile},
- {KeyFile: tlsinfo.KeyFile, TrustedCAFile: tlsinfo.TrustedCAFile},
- }
-
- for i, info := range tests {
- if _, err = info.ServerConfig(); err == nil {
- t.Errorf("#%d: expected non-nil error from ServerConfig()", i)
- }
-
- if _, err = info.ClientConfig(); err == nil {
- t.Errorf("#%d: expected non-nil error from ClientConfig()", i)
- }
- }
-}
-
-func TestTLSInfoParseFuncError(t *testing.T) {
- tlsinfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- tests := []struct {
- info TLSInfo
- }{
- {
- info: *tlsinfo,
- },
-
- {
- info: TLSInfo{CertFile: "", KeyFile: "", TrustedCAFile: tlsinfo.CertFile, EmptyCN: true},
- },
- }
-
- for i, tt := range tests {
- tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, errors.New("fake"))
-
- if _, err = tt.info.ServerConfig(); err == nil {
- t.Errorf("#%d: expected non-nil error from ServerConfig()", i)
- }
-
- if _, err = tt.info.ClientConfig(); err == nil {
- t.Errorf("#%d: expected non-nil error from ClientConfig()", i)
- }
- }
-}
-
-func TestTLSInfoConfigFuncs(t *testing.T) {
- ln := zaptest.NewLogger(t)
- tlsinfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- tests := []struct {
- info TLSInfo
- clientAuth tls.ClientAuthType
- wantCAs bool
- }{
- {
- info: TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile, Logger: ln},
- clientAuth: tls.NoClientCert,
- wantCAs: false,
- },
-
- {
- info: TLSInfo{CertFile: tlsinfo.CertFile, KeyFile: tlsinfo.KeyFile, TrustedCAFile: tlsinfo.CertFile, Logger: ln},
- clientAuth: tls.RequireAndVerifyClientCert,
- wantCAs: true,
- },
- }
-
- for i, tt := range tests {
- tt.info.parseFunc = fakeCertificateParserFunc(tls.Certificate{}, nil)
-
- sCfg, err := tt.info.ServerConfig()
- if err != nil {
- t.Errorf("#%d: expected nil error from ServerConfig(), got non-nil: %v", i, err)
- }
-
- if tt.wantCAs != (sCfg.ClientCAs != nil) {
- t.Errorf("#%d: wantCAs=%t but ClientCAs=%v", i, tt.wantCAs, sCfg.ClientCAs)
- }
-
- cCfg, err := tt.info.ClientConfig()
- if err != nil {
- t.Errorf("#%d: expected nil error from ClientConfig(), got non-nil: %v", i, err)
- }
-
- if tt.wantCAs != (cCfg.RootCAs != nil) {
- t.Errorf("#%d: wantCAs=%t but RootCAs=%v", i, tt.wantCAs, sCfg.RootCAs)
- }
- }
-}
-
-func TestNewListenerUnixSocket(t *testing.T) {
- l, err := NewListener("testsocket", "unix", nil)
- if err != nil {
- t.Errorf("error listening on unix socket (%v)", err)
- }
- l.Close()
-}
-
-// TestNewListenerTLSInfoSelfCert tests that a new certificate accepts connections.
-func TestNewListenerTLSInfoSelfCert(t *testing.T) {
- tmpdir := t.TempDir()
-
- tlsinfo, err := SelfCert(zaptest.NewLogger(t), tmpdir, []string{"127.0.0.1"}, 1)
- if err != nil {
- t.Fatal(err)
- }
- if tlsinfo.Empty() {
- t.Fatalf("tlsinfo should have certs (%+v)", tlsinfo)
- }
- testNewListenerTLSInfoAccept(t, tlsinfo)
-
- assert.Panics(t, func() {
- SelfCert(nil, tmpdir, []string{"127.0.0.1"}, 1)
- }, "expected panic with nil log")
-}
-
-func TestIsClosedConnError(t *testing.T) {
- l, err := NewListener("testsocket", "unix", nil)
- if err != nil {
- t.Errorf("error listening on unix socket (%v)", err)
- }
- l.Close()
- _, err = l.Accept()
- if !IsClosedConnError(err) {
- t.Fatalf("expect true, got false (%v)", err)
- }
-}
-
-func TestSocktOptsEmpty(t *testing.T) {
- tests := []struct {
- sopts SocketOpts
- want bool
- }{
- {SocketOpts{}, true},
- {SocketOpts{ReuseAddress: true, ReusePort: false}, false},
- {SocketOpts{ReusePort: true}, false},
- }
-
- for i, tt := range tests {
- got := tt.sopts.Empty()
- if tt.want != got {
- t.Errorf("#%d: result of Empty() incorrect: want=%t got=%t", i, tt.want, got)
- }
- }
-}
diff --git a/client/pkg/transport/sockopt.go b/client/pkg/transport/sockopt.go
deleted file mode 100644
index 49b48dc8767..00000000000
--- a/client/pkg/transport/sockopt.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "syscall"
-)
-
-type Controls []func(network, addr string, conn syscall.RawConn) error
-
-func (ctls Controls) Control(network, addr string, conn syscall.RawConn) error {
- for _, s := range ctls {
- if err := s(network, addr, conn); err != nil {
- return err
- }
- }
- return nil
-}
-
-type SocketOpts struct {
- // ReusePort enables socket option SO_REUSEPORT [1] which allows rebind of
- // a port already in use. User should keep in mind that flock can fail
- // in which case lock on data file could result in unexpected
- // condition. User should take caution to protect against lock race.
- // [1] https://man7.org/linux/man-pages/man7/socket.7.html
- ReusePort bool `json:"reuse-port"`
- // ReuseAddress enables a socket option SO_REUSEADDR which allows
- // binding to an address in `TIME_WAIT` state. Useful to improve MTTR
- // in cases where etcd slow to restart due to excessive `TIME_WAIT`.
- // [1] https://man7.org/linux/man-pages/man7/socket.7.html
- ReuseAddress bool `json:"reuse-address"`
-}
-
-func getControls(sopts *SocketOpts) Controls {
- ctls := Controls{}
- if sopts.ReuseAddress {
- ctls = append(ctls, setReuseAddress)
- }
- if sopts.ReusePort {
- ctls = append(ctls, setReusePort)
- }
- return ctls
-}
-
-func (sopts *SocketOpts) Empty() bool {
- return !sopts.ReuseAddress && !sopts.ReusePort
-}
diff --git a/client/pkg/transport/sockopt_solaris.go b/client/pkg/transport/sockopt_solaris.go
deleted file mode 100644
index 149ad510240..00000000000
--- a/client/pkg/transport/sockopt_solaris.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build solaris
-
-package transport
-
-import (
- "errors"
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-func setReusePort(network, address string, c syscall.RawConn) error {
- return errors.New("port reuse is not supported on Solaris")
-}
-
-func setReuseAddress(network, address string, conn syscall.RawConn) error {
- return conn.Control(func(fd uintptr) {
- syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1)
- })
-}
diff --git a/client/pkg/transport/sockopt_unix.go b/client/pkg/transport/sockopt_unix.go
deleted file mode 100644
index 4e76bf95be1..00000000000
--- a/client/pkg/transport/sockopt_unix.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows && !solaris
-
-package transport
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-func setReusePort(network, address string, conn syscall.RawConn) error {
- return conn.Control(func(fd uintptr) {
- syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1)
- })
-}
-
-func setReuseAddress(network, address string, conn syscall.RawConn) error {
- return conn.Control(func(fd uintptr) {
- syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1)
- })
-}
diff --git a/client/pkg/transport/sockopt_windows.go b/client/pkg/transport/sockopt_windows.go
deleted file mode 100644
index 2670b4dc7b5..00000000000
--- a/client/pkg/transport/sockopt_windows.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package transport
-
-import (
- "errors"
- "syscall"
-)
-
-func setReusePort(network, address string, c syscall.RawConn) error {
- return errors.New("port reuse is not supported on Windows")
-}
-
-// Windows supports SO_REUSEADDR, but it may cause undefined behavior, as
-// there is no protection against port hijacking.
-func setReuseAddress(network, addr string, conn syscall.RawConn) error {
- return errors.New("address reuse is not supported on Windows")
-}
diff --git a/client/pkg/transport/timeout_dialer_test.go b/client/pkg/transport/timeout_dialer_test.go
deleted file mode 100644
index 854d68d1472..00000000000
--- a/client/pkg/transport/timeout_dialer_test.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "net"
- "testing"
- "time"
-)
-
-func TestReadWriteTimeoutDialer(t *testing.T) {
- stop := make(chan struct{})
-
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected listen error: %v", err)
- }
- defer func() {
- stop <- struct{}{}
- }()
- ts := testBlockingServer{ln, 2, stop}
- go ts.Start(t)
-
- d := rwTimeoutDialer{
- wtimeoutd: 10 * time.Millisecond,
- rdtimeoutd: 10 * time.Millisecond,
- }
- conn, err := d.Dial("tcp", ln.Addr().String())
- if err != nil {
- t.Fatalf("unexpected dial error: %v", err)
- }
- defer conn.Close()
-
- // fill the socket buffer
- data := make([]byte, 5*1024*1024)
- done := make(chan struct{}, 1)
- go func() {
- _, err = conn.Write(data)
- done <- struct{}{}
- }()
-
- select {
- case <-done:
- // Wait 5s more than timeout to avoid delay in low-end systems;
- // the slack was 1s extra, but that wasn't enough for CI.
- case <-time.After(d.wtimeoutd*10 + 5*time.Second):
- t.Fatal("wait timeout")
- }
-
- if operr, ok := err.(*net.OpError); !ok || operr.Op != "write" || !operr.Timeout() {
- t.Errorf("err = %v, want write i/o timeout error", err)
- }
-
- conn, err = d.Dial("tcp", ln.Addr().String())
- if err != nil {
- t.Fatalf("unexpected dial error: %v", err)
- }
- defer conn.Close()
-
- buf := make([]byte, 10)
- go func() {
- _, err = conn.Read(buf)
- done <- struct{}{}
- }()
-
- select {
- case <-done:
- case <-time.After(d.rdtimeoutd * 10):
- t.Fatal("wait timeout")
- }
-
- if operr, ok := err.(*net.OpError); !ok || operr.Op != "read" || !operr.Timeout() {
- t.Errorf("err = %v, want read i/o timeout error", err)
- }
-}
-
-type testBlockingServer struct {
- ln net.Listener
- n int
- stop chan struct{}
-}
-
-func (ts *testBlockingServer) Start(t *testing.T) {
- for i := 0; i < ts.n; i++ {
- conn, err := ts.ln.Accept()
- if err != nil {
- t.Error(err)
- }
- defer conn.Close()
- }
- <-ts.stop
-}
diff --git a/client/pkg/transport/timeout_listener_test.go b/client/pkg/transport/timeout_listener_test.go
deleted file mode 100644
index 828ddf8620f..00000000000
--- a/client/pkg/transport/timeout_listener_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "net"
- "testing"
- "time"
-)
-
-// TestNewTimeoutListener tests that NewTimeoutListener returns a
-// rwTimeoutListener struct with timeouts set.
-func TestNewTimeoutListener(t *testing.T) {
- l, err := NewTimeoutListener("127.0.0.1:0", "http", nil, time.Hour, time.Hour)
- if err != nil {
- t.Fatalf("unexpected NewTimeoutListener error: %v", err)
- }
- defer l.Close()
- tln := l.(*rwTimeoutListener)
- if tln.readTimeout != time.Hour {
- t.Errorf("read timeout = %s, want %s", tln.readTimeout, time.Hour)
- }
- if tln.writeTimeout != time.Hour {
- t.Errorf("write timeout = %s, want %s", tln.writeTimeout, time.Hour)
- }
-}
-
-func TestWriteReadTimeoutListener(t *testing.T) {
- ln, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("unexpected listen error: %v", err)
- }
- wln := rwTimeoutListener{
- Listener: ln,
- writeTimeout: 10 * time.Millisecond,
- readTimeout: 10 * time.Millisecond,
- }
-
- blocker := func(stopCh <-chan struct{}) {
- conn, derr := net.Dial("tcp", ln.Addr().String())
- if derr != nil {
- t.Errorf("unexpected dail error: %v", derr)
- }
- defer conn.Close()
- // block the receiver until the writer timeout
- <-stopCh
- }
-
- writerStopCh := make(chan struct{}, 1)
- go blocker(writerStopCh)
-
- conn, err := wln.Accept()
- if err != nil {
- writerStopCh <- struct{}{}
- t.Fatalf("unexpected accept error: %v", err)
- }
- defer conn.Close()
-
- // fill the socket buffer
- data := make([]byte, 5*1024*1024)
- done := make(chan struct{}, 1)
- go func() {
- _, err = conn.Write(data)
- done <- struct{}{}
- }()
-
- select {
- case <-done:
- // It waits 1s more to avoid delay in low-end system.
- case <-time.After(wln.writeTimeout*10 + time.Second):
- writerStopCh <- struct{}{}
- t.Fatal("wait timeout")
- }
-
- if operr, ok := err.(*net.OpError); !ok || operr.Op != "write" || !operr.Timeout() {
- t.Errorf("err = %v, want write i/o timeout error", err)
- }
- writerStopCh <- struct{}{}
-
- readerStopCh := make(chan struct{}, 1)
- go blocker(readerStopCh)
-
- conn, err = wln.Accept()
- if err != nil {
- readerStopCh <- struct{}{}
- t.Fatalf("unexpected accept error: %v", err)
- }
- buf := make([]byte, 10)
-
- go func() {
- _, err = conn.Read(buf)
- done <- struct{}{}
- }()
-
- select {
- case <-done:
- case <-time.After(wln.readTimeout * 10):
- readerStopCh <- struct{}{}
- t.Fatal("wait timeout")
- }
-
- if operr, ok := err.(*net.OpError); !ok || operr.Op != "read" || !operr.Timeout() {
- t.Errorf("err = %v, want read i/o timeout error", err)
- }
- readerStopCh <- struct{}{}
-}
diff --git a/client/pkg/transport/timeout_transport.go b/client/pkg/transport/timeout_transport.go
deleted file mode 100644
index ea16b4c0f86..00000000000
--- a/client/pkg/transport/timeout_transport.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "net"
- "net/http"
- "time"
-)
-
-// NewTimeoutTransport returns a transport created using the given TLS info.
-// If read/write on the created connection blocks longer than its time limit,
-// it will return timeout error.
-// If read/write timeout is set, transport will not be able to reuse connection.
-func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) {
- tr, err := NewTransport(info, dialtimeoutd)
- if err != nil {
- return nil, err
- }
-
- if rdtimeoutd != 0 || wtimeoutd != 0 {
- // the timed out connection will timeout soon after it is idle.
- // it should not be put back to http transport as an idle connection for future usage.
- tr.MaxIdleConnsPerHost = -1
- } else {
- // allow more idle connections between peers to avoid unnecessary port allocation.
- tr.MaxIdleConnsPerHost = 1024
- }
-
- tr.Dial = (&rwTimeoutDialer{
- Dialer: net.Dialer{
- Timeout: dialtimeoutd,
- KeepAlive: 30 * time.Second,
- },
- rdtimeoutd: rdtimeoutd,
- wtimeoutd: wtimeoutd,
- }).Dial
- return tr, nil
-}
diff --git a/client/pkg/transport/timeout_transport_test.go b/client/pkg/transport/timeout_transport_test.go
deleted file mode 100644
index 95079f9b598..00000000000
--- a/client/pkg/transport/timeout_transport_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "bytes"
- "io"
- "net/http"
- "net/http/httptest"
- "testing"
- "time"
-)
-
-// TestNewTimeoutTransport tests that NewTimeoutTransport returns a transport
-// that can dial out timeout connections.
-func TestNewTimeoutTransport(t *testing.T) {
- tr, err := NewTimeoutTransport(TLSInfo{}, time.Hour, time.Hour, time.Hour)
- if err != nil {
- t.Fatalf("unexpected NewTimeoutTransport error: %v", err)
- }
-
- remoteAddr := func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte(r.RemoteAddr))
- }
- srv := httptest.NewServer(http.HandlerFunc(remoteAddr))
-
- defer srv.Close()
- conn, err := tr.Dial("tcp", srv.Listener.Addr().String())
- if err != nil {
- t.Fatalf("unexpected dial error: %v", err)
- }
- defer conn.Close()
-
- tconn, ok := conn.(*timeoutConn)
- if !ok {
- t.Fatalf("failed to dial out *timeoutConn")
- }
- if tconn.readTimeout != time.Hour {
- t.Errorf("read timeout = %s, want %s", tconn.readTimeout, time.Hour)
- }
- if tconn.writeTimeout != time.Hour {
- t.Errorf("write timeout = %s, want %s", tconn.writeTimeout, time.Hour)
- }
-
- // ensure not reuse timeout connection
- req, err := http.NewRequest("GET", srv.URL, nil)
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
- resp, err := tr.RoundTrip(req)
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
- addr0, err := io.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
-
- resp, err = tr.RoundTrip(req)
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
- addr1, err := io.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- t.Fatalf("unexpected err %v", err)
- }
-
- if bytes.Equal(addr0, addr1) {
- t.Errorf("addr0 = %s addr1= %s, want not equal", string(addr0), string(addr1))
- }
-}
diff --git a/client/pkg/transport/tls_test.go b/client/pkg/transport/tls_test.go
deleted file mode 100644
index 46af1db6786..00000000000
--- a/client/pkg/transport/tls_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "net/http"
- "net/http/httptest"
- "reflect"
- "testing"
-)
-
-func TestValidateSecureEndpoints(t *testing.T) {
- tlsInfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- remoteAddr := func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte(r.RemoteAddr))
- }
- srv := httptest.NewServer(http.HandlerFunc(remoteAddr))
- defer srv.Close()
-
- tests := map[string]struct {
- endPoints []string
- expectedEndpoints []string
- expectedErr bool
- }{
- "invalidEndPoints": {
- endPoints: []string{
- "invalid endpoint",
- },
- expectedEndpoints: nil,
- expectedErr: true,
- },
- "insecureEndpoints": {
- endPoints: []string{
- "http://127.0.0.1:8000",
- "http://" + srv.Listener.Addr().String(),
- },
- expectedEndpoints: nil,
- expectedErr: true,
- },
- "secureEndPoints": {
- endPoints: []string{
- "https://" + srv.Listener.Addr().String(),
- },
- expectedEndpoints: []string{
- "https://" + srv.Listener.Addr().String(),
- },
- expectedErr: false,
- },
- "mixEndPoints": {
- endPoints: []string{
- "https://" + srv.Listener.Addr().String(),
- "http://" + srv.Listener.Addr().String(),
- "invalid end points",
- },
- expectedEndpoints: []string{
- "https://" + srv.Listener.Addr().String(),
- },
- expectedErr: true,
- },
- }
- for name, test := range tests {
- t.Run(name, func(t *testing.T) {
- secureEps, err := ValidateSecureEndpoints(*tlsInfo, test.endPoints)
- if test.expectedErr != (err != nil) {
- t.Errorf("Unexpected error, got: %v, want: %v", err, test.expectedErr)
- }
-
- if !reflect.DeepEqual(test.expectedEndpoints, secureEps) {
- t.Errorf("expected endpoints %v, got %v", test.expectedEndpoints, secureEps)
- }
- })
- }
-}
diff --git a/client/pkg/transport/transport.go b/client/pkg/transport/transport.go
deleted file mode 100644
index 91462dcdb08..00000000000
--- a/client/pkg/transport/transport.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "context"
- "net"
- "net/http"
- "strings"
- "time"
-)
-
-type unixTransport struct{ *http.Transport }
-
-func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) {
- cfg, err := info.ClientConfig()
- if err != nil {
- return nil, err
- }
-
- t := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: (&net.Dialer{
- Timeout: dialtimeoutd,
- // value taken from http.DefaultTransport
- KeepAlive: 30 * time.Second,
- }).DialContext,
- // value taken from http.DefaultTransport
- TLSHandshakeTimeout: 10 * time.Second,
- TLSClientConfig: cfg,
- }
-
- dialer := &net.Dialer{
- Timeout: dialtimeoutd,
- KeepAlive: 30 * time.Second,
- }
-
- dialContext := func(ctx context.Context, net, addr string) (net.Conn, error) {
- return dialer.DialContext(ctx, "unix", addr)
- }
- tu := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: dialContext,
- TLSHandshakeTimeout: 10 * time.Second,
- TLSClientConfig: cfg,
- // Cost of reopening connection on sockets is low, and they are mostly used in testing.
- // Long living unix-transport connections were leading to 'leak' test flakes.
- // Alternatively the returned Transport (t) should override CloseIdleConnections to
- // forward it to 'tu' as well.
- IdleConnTimeout: time.Microsecond,
- }
- ut := &unixTransport{tu}
-
- t.RegisterProtocol("unix", ut)
- t.RegisterProtocol("unixs", ut)
-
- return t, nil
-}
-
-func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- url := *req.URL
- req.URL = &url
- req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1)
- return urt.Transport.RoundTrip(req)
-}
diff --git a/client/pkg/transport/transport_test.go b/client/pkg/transport/transport_test.go
deleted file mode 100644
index 315f32cf2dc..00000000000
--- a/client/pkg/transport/transport_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "crypto/tls"
- "net/http"
- "strings"
- "testing"
- "time"
-)
-
-// TestNewTransportTLSInvalidCipherSuitesTLS12 expects a client with invalid
-// cipher suites fail to handshake with the server.
-func TestNewTransportTLSInvalidCipherSuitesTLS12(t *testing.T) {
- tlsInfo, err := createSelfCert(t)
- if err != nil {
- t.Fatalf("unable to create cert: %v", err)
- }
-
- cipherSuites := []uint16{
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- }
-
- // make server and client have unmatched cipher suites
- srvTLS, cliTLS := *tlsInfo, *tlsInfo
- srvTLS.CipherSuites, cliTLS.CipherSuites = cipherSuites[:2], cipherSuites[2:]
-
- ln, err := NewListener("127.0.0.1:0", "https", &srvTLS)
- if err != nil {
- t.Fatalf("unexpected NewListener error: %v", err)
- }
- defer ln.Close()
-
- donec := make(chan struct{})
- go func() {
- ln.Accept()
- donec <- struct{}{}
- }()
- go func() {
- tr, err := NewTransport(cliTLS, 3*time.Second)
- tr.TLSClientConfig.MaxVersion = tls.VersionTLS12
- if err != nil {
- t.Errorf("unexpected NewTransport error: %v", err)
- }
- cli := &http.Client{Transport: tr}
- _, gerr := cli.Get("https://" + ln.Addr().String())
- if gerr == nil || !strings.Contains(gerr.Error(), "tls: handshake failure") {
- t.Error("expected client TLS handshake error")
- }
- ln.Close()
- donec <- struct{}{}
- }()
- <-donec
- <-donec
-}
diff --git a/client/pkg/types/id.go b/client/pkg/types/id.go
deleted file mode 100644
index 9a8429391ed..00000000000
--- a/client/pkg/types/id.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "bytes"
- "strconv"
-)
-
-// ID represents a generic identifier which is canonically
-// stored as a uint64 but is typically represented as a
-// base-16 string for input/output
-type ID uint64
-
-func (i ID) String() string {
- return strconv.FormatUint(uint64(i), 16)
-}
-
-// IDFromString attempts to create an ID from a base-16 string.
-func IDFromString(s string) (ID, error) {
- i, err := strconv.ParseUint(s, 16, 64)
- return ID(i), err
-}
-
-// IDSlice implements the sort interface
-type IDSlice []ID
-
-func (p IDSlice) Len() int { return len(p) }
-func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
-func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-func (p IDSlice) String() string {
- var b bytes.Buffer
- if p.Len() > 0 {
- b.WriteString(p[0].String())
- }
-
- for i := 1; i < p.Len(); i++ {
- b.WriteString(",")
- b.WriteString(p[i].String())
- }
-
- return b.String()
-}
diff --git a/client/pkg/types/id_test.go b/client/pkg/types/id_test.go
deleted file mode 100644
index bec2853432b..00000000000
--- a/client/pkg/types/id_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "reflect"
- "sort"
- "testing"
-)
-
-func TestIDString(t *testing.T) {
- tests := []struct {
- input ID
- want string
- }{
- {
- input: 12,
- want: "c",
- },
- {
- input: 4918257920282737594,
- want: "444129853c343bba",
- },
- }
-
- for i, tt := range tests {
- got := tt.input.String()
- if tt.want != got {
- t.Errorf("#%d: ID.String failure: want=%v, got=%v", i, tt.want, got)
- }
- }
-}
-
-func TestIDFromString(t *testing.T) {
- tests := []struct {
- input string
- want ID
- }{
- {
- input: "17",
- want: 23,
- },
- {
- input: "612840dae127353",
- want: 437557308098245459,
- },
- }
-
- for i, tt := range tests {
- got, err := IDFromString(tt.input)
- if err != nil {
- t.Errorf("#%d: IDFromString failure: err=%v", i, err)
- continue
- }
- if tt.want != got {
- t.Errorf("#%d: IDFromString failure: want=%v, got=%v", i, tt.want, got)
- }
- }
-}
-
-func TestIDFromStringFail(t *testing.T) {
- tests := []string{
- "",
- "XXX",
- "612840dae127353612840dae127353",
- }
-
- for i, tt := range tests {
- _, err := IDFromString(tt)
- if err == nil {
- t.Fatalf("#%d: IDFromString expected error, but err=nil", i)
- }
- }
-}
-
-func TestIDSlice(t *testing.T) {
- g := []ID{10, 500, 5, 1, 100, 25}
- w := []ID{1, 5, 10, 25, 100, 500}
- sort.Sort(IDSlice(g))
- if !reflect.DeepEqual(g, w) {
- t.Errorf("slice after sort = %#v, want %#v", g, w)
- }
-}
diff --git a/client/pkg/types/set_test.go b/client/pkg/types/set_test.go
deleted file mode 100644
index 73572028931..00000000000
--- a/client/pkg/types/set_test.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "reflect"
- "sort"
- "testing"
-)
-
-func TestUnsafeSet(t *testing.T) {
- driveSetTests(t, NewUnsafeSet())
-}
-
-func TestThreadsafeSet(t *testing.T) {
- driveSetTests(t, NewThreadsafeSet())
-}
-
-// Check that two slices contents are equal; order is irrelevant
-func equal(a, b []string) bool {
- as := sort.StringSlice(a)
- bs := sort.StringSlice(b)
- as.Sort()
- bs.Sort()
- return reflect.DeepEqual(as, bs)
-}
-
-func driveSetTests(t *testing.T, s Set) {
- // Verify operations on an empty set
- values := s.Values()
- if len(values) != 0 {
- t.Fatalf("Expect values=%v got %v", []string{}, values)
- }
- if l := s.Length(); l != 0 {
- t.Fatalf("Expected length=0, got %d", l)
- }
- for _, v := range []string{"foo", "bar", "baz"} {
- if s.Contains(v) {
- t.Fatalf("Expect s.Contains(%q) to be fale, got true", v)
- }
- }
-
- // Add three items, ensure they show up
- s.Add("foo")
- s.Add("bar")
- s.Add("baz")
-
- eValues := []string{"foo", "bar", "baz"}
- values = s.Values()
- if !equal(values, eValues) {
- t.Fatalf("Expect values=%v got %v", eValues, values)
- }
-
- for _, v := range eValues {
- if !s.Contains(v) {
- t.Fatalf("Expect s.Contains(%q) to be true, got false", v)
- }
- }
-
- if l := s.Length(); l != 3 {
- t.Fatalf("Expected length=3, got %d", l)
- }
-
- // Add the same item a second time, ensuring it is not duplicated
- s.Add("foo")
-
- values = s.Values()
- if !equal(values, eValues) {
- t.Fatalf("Expect values=%v got %v", eValues, values)
- }
- if l := s.Length(); l != 3 {
- t.Fatalf("Expected length=3, got %d", l)
- }
-
- // Remove all items, ensure they are gone
- s.Remove("foo")
- s.Remove("bar")
- s.Remove("baz")
-
- eValues = []string{}
- values = s.Values()
- if !equal(values, eValues) {
- t.Fatalf("Expect values=%v got %v", eValues, values)
- }
-
- if l := s.Length(); l != 0 {
- t.Fatalf("Expected length=0, got %d", l)
- }
-
- // Create new copies of the set, and ensure they are unlinked to the
- // original Set by making modifications
- s.Add("foo")
- s.Add("bar")
- cp1 := s.Copy()
- cp2 := s.Copy()
- s.Remove("foo")
- cp3 := s.Copy()
- cp1.Add("baz")
-
- for i, tt := range []struct {
- want []string
- got []string
- }{
- {[]string{"bar"}, s.Values()},
- {[]string{"foo", "bar", "baz"}, cp1.Values()},
- {[]string{"foo", "bar"}, cp2.Values()},
- {[]string{"bar"}, cp3.Values()},
- } {
- if !equal(tt.want, tt.got) {
- t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got)
- }
- }
-
- for i, tt := range []struct {
- want bool
- got bool
- }{
- {true, s.Equals(cp3)},
- {true, cp3.Equals(s)},
- {false, s.Equals(cp2)},
- {false, s.Equals(cp1)},
- {false, cp1.Equals(s)},
- {false, cp2.Equals(s)},
- {false, cp2.Equals(cp1)},
- } {
- if tt.got != tt.want {
- t.Fatalf("case %d: want %t, got %t", i, tt.want, tt.got)
-
- }
- }
-
- // Subtract values from a Set, ensuring a new Set is created and
- // the original Sets are unmodified
- sub1 := cp1.Sub(s)
- sub2 := cp2.Sub(cp1)
-
- for i, tt := range []struct {
- want []string
- got []string
- }{
- {[]string{"foo", "bar", "baz"}, cp1.Values()},
- {[]string{"foo", "bar"}, cp2.Values()},
- {[]string{"bar"}, s.Values()},
- {[]string{"foo", "baz"}, sub1.Values()},
- {[]string{}, sub2.Values()},
- } {
- if !equal(tt.want, tt.got) {
- t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got)
- }
- }
-}
-
-func TestUnsafeSetContainsAll(t *testing.T) {
- vals := []string{"foo", "bar", "baz"}
- s := NewUnsafeSet(vals...)
-
- tests := []struct {
- strs []string
- wcontain bool
- }{
- {[]string{}, true},
- {vals[:1], true},
- {vals[:2], true},
- {vals, true},
- {[]string{"cuz"}, false},
- {[]string{vals[0], "cuz"}, false},
- }
- for i, tt := range tests {
- if g := s.ContainsAll(tt.strs); g != tt.wcontain {
- t.Errorf("#%d: ok = %v, want %v", i, g, tt.wcontain)
- }
- }
-}
diff --git a/client/pkg/types/slice_test.go b/client/pkg/types/slice_test.go
deleted file mode 100644
index 8d8a4d0ea70..00000000000
--- a/client/pkg/types/slice_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "reflect"
- "sort"
- "testing"
-)
-
-func TestUint64Slice(t *testing.T) {
- g := Uint64Slice{10, 500, 5, 1, 100, 25}
- w := Uint64Slice{1, 5, 10, 25, 100, 500}
- sort.Sort(g)
- if !reflect.DeepEqual(g, w) {
- t.Errorf("slice after sort = %#v, want %#v", g, w)
- }
-}
diff --git a/client/pkg/types/urls_test.go b/client/pkg/types/urls_test.go
deleted file mode 100644
index fbb9068cdd5..00000000000
--- a/client/pkg/types/urls_test.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "reflect"
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-func TestNewURLs(t *testing.T) {
- tests := []struct {
- strs []string
- wurls URLs
- }{
- {
- []string{"http://127.0.0.1:2379"},
- testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}),
- },
- // it can trim space
- {
- []string{" http://127.0.0.1:2379 "},
- testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}),
- },
- // it does sort
- {
- []string{
- "http://127.0.0.2:2379",
- "http://127.0.0.1:2379",
- },
- testutil.MustNewURLs(t, []string{
- "http://127.0.0.1:2379",
- "http://127.0.0.2:2379",
- }),
- },
- }
- for i, tt := range tests {
- urls, _ := NewURLs(tt.strs)
- if !reflect.DeepEqual(urls, tt.wurls) {
- t.Errorf("#%d: urls = %+v, want %+v", i, urls, tt.wurls)
- }
- }
-}
-
-func TestURLsString(t *testing.T) {
- tests := []struct {
- us URLs
- wstr string
- }{
- {
- URLs{},
- "",
- },
- {
- testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}),
- "http://127.0.0.1:2379",
- },
- {
- testutil.MustNewURLs(t, []string{
- "http://127.0.0.1:2379",
- "http://127.0.0.2:2379",
- }),
- "http://127.0.0.1:2379,http://127.0.0.2:2379",
- },
- {
- testutil.MustNewURLs(t, []string{
- "http://127.0.0.2:2379",
- "http://127.0.0.1:2379",
- }),
- "http://127.0.0.2:2379,http://127.0.0.1:2379",
- },
- }
- for i, tt := range tests {
- g := tt.us.String()
- if g != tt.wstr {
- t.Errorf("#%d: string = %s, want %s", i, g, tt.wstr)
- }
- }
-}
-
-func TestURLsSort(t *testing.T) {
- g := testutil.MustNewURLs(t, []string{
- "http://127.0.0.4:2379",
- "http://127.0.0.2:2379",
- "http://127.0.0.1:2379",
- "http://127.0.0.3:2379",
- })
- w := testutil.MustNewURLs(t, []string{
- "http://127.0.0.1:2379",
- "http://127.0.0.2:2379",
- "http://127.0.0.3:2379",
- "http://127.0.0.4:2379",
- })
- gurls := URLs(g)
- gurls.Sort()
- if !reflect.DeepEqual(g, w) {
- t.Errorf("URLs after sort = %#v, want %#v", g, w)
- }
-}
-
-func TestURLsStringSlice(t *testing.T) {
- tests := []struct {
- us URLs
- wstr []string
- }{
- {
- URLs{},
- []string{},
- },
- {
- testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}),
- []string{"http://127.0.0.1:2379"},
- },
- {
- testutil.MustNewURLs(t, []string{
- "http://127.0.0.1:2379",
- "http://127.0.0.2:2379",
- }),
- []string{"http://127.0.0.1:2379", "http://127.0.0.2:2379"},
- },
- {
- testutil.MustNewURLs(t, []string{
- "http://127.0.0.2:2379",
- "http://127.0.0.1:2379",
- }),
- []string{"http://127.0.0.2:2379", "http://127.0.0.1:2379"},
- },
- }
- for i, tt := range tests {
- g := tt.us.StringSlice()
- if !reflect.DeepEqual(g, tt.wstr) {
- t.Errorf("#%d: string slice = %+v, want %+v", i, g, tt.wstr)
- }
- }
-}
-
-func TestNewURLsFail(t *testing.T) {
- tests := [][]string{
- // no urls given
- {},
- // missing protocol scheme
- {"://127.0.0.1:2379"},
- // unsupported scheme
- {"mailto://127.0.0.1:2379"},
- // not conform to host:port
- {"http://127.0.0.1"},
- // contain a path
- {"http://127.0.0.1:2379/path"},
- }
- for i, tt := range tests {
- _, err := NewURLs(tt)
- if err == nil {
- t.Errorf("#%d: err = nil, but error", i)
- }
- }
-}
diff --git a/client/pkg/types/urlsmap_test.go b/client/pkg/types/urlsmap_test.go
deleted file mode 100644
index da184282e79..00000000000
--- a/client/pkg/types/urlsmap_test.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "reflect"
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-func TestParseInitialCluster(t *testing.T) {
- c, err := NewURLsMap("mem1=http://10.0.0.1:2379,mem1=http://128.193.4.20:2379,mem2=http://10.0.0.2:2379,default=http://127.0.0.1:2379")
- if err != nil {
- t.Fatalf("unexpected parse error: %v", err)
- }
- wc := URLsMap(map[string]URLs{
- "mem1": testutil.MustNewURLs(t, []string{"http://10.0.0.1:2379", "http://128.193.4.20:2379"}),
- "mem2": testutil.MustNewURLs(t, []string{"http://10.0.0.2:2379"}),
- "default": testutil.MustNewURLs(t, []string{"http://127.0.0.1:2379"}),
- })
- if !reflect.DeepEqual(c, wc) {
- t.Errorf("cluster = %+v, want %+v", c, wc)
- }
-}
-
-func TestParseInitialClusterBad(t *testing.T) {
- tests := []string{
- // invalid URL
- "%^",
- // no URL defined for member
- "mem1=,mem2=http://128.193.4.20:2379,mem3=http://10.0.0.2:2379",
- "mem1,mem2=http://128.193.4.20:2379,mem3=http://10.0.0.2:2379",
- // bad URL for member
- "default=http://localhost/",
- }
- for i, tt := range tests {
- if _, err := NewURLsMap(tt); err == nil {
- t.Errorf("#%d: unexpected successful parse, want err", i)
- }
- }
-}
-
-func TestNameURLPairsString(t *testing.T) {
- cls := URLsMap(map[string]URLs{
- "abc": testutil.MustNewURLs(t, []string{"http://1.1.1.1:1111", "http://0.0.0.0:0000"}),
- "def": testutil.MustNewURLs(t, []string{"http://2.2.2.2:2222"}),
- "ghi": testutil.MustNewURLs(t, []string{"http://3.3.3.3:1234", "http://127.0.0.1:2380"}),
- // no PeerURLs = not included
- "four": testutil.MustNewURLs(t, []string{}),
- "five": testutil.MustNewURLs(t, nil),
- })
- w := "abc=http://0.0.0.0:0000,abc=http://1.1.1.1:1111,def=http://2.2.2.2:2222,ghi=http://127.0.0.1:2380,ghi=http://3.3.3.3:1234"
- if g := cls.String(); g != w {
- t.Fatalf("NameURLPairs.String():\ngot %#v\nwant %#v", g, w)
- }
-}
-
-func TestParse(t *testing.T) {
- tests := []struct {
- s string
- wm map[string][]string
- }{
- {
- "",
- map[string][]string{},
- },
- {
- "a=b",
- map[string][]string{"a": {"b"}},
- },
- {
- "a=b,a=c",
- map[string][]string{"a": {"b", "c"}},
- },
- {
- "a=b,a1=c",
- map[string][]string{"a": {"b"}, "a1": {"c"}},
- },
- }
- for i, tt := range tests {
- m := parse(tt.s)
- if !reflect.DeepEqual(m, tt.wm) {
- t.Errorf("#%d: m = %+v, want %+v", i, m, tt.wm)
- }
- }
-}
-
-// TestNewURLsMapIPV6 is only tested in Go1.5+ because Go1.4 doesn't support literal IPv6 address with zone in
-// URI (https://github.com/golang/go/issues/6530).
-func TestNewURLsMapIPV6(t *testing.T) {
- c, err := NewURLsMap("mem1=http://[2001:db8::1]:2380,mem1=http://[fe80::6e40:8ff:feb1:58e4%25en0]:2380,mem2=http://[fe80::92e2:baff:fe7c:3224%25ext0]:2380")
- if err != nil {
- t.Fatalf("unexpected parse error: %v", err)
- }
- wc := URLsMap(map[string]URLs{
- "mem1": testutil.MustNewURLs(t, []string{"http://[2001:db8::1]:2380", "http://[fe80::6e40:8ff:feb1:58e4%25en0]:2380"}),
- "mem2": testutil.MustNewURLs(t, []string{"http://[fe80::92e2:baff:fe7c:3224%25ext0]:2380"}),
- })
- if !reflect.DeepEqual(c, wc) {
- t.Errorf("cluster = %#v, want %#v", c, wc)
- }
-}
-
-func TestNewURLsMapFromStringMapEmpty(t *testing.T) {
- mss := make(map[string]string)
- urlsMap, err := NewURLsMapFromStringMap(mss, ",")
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- s := ""
- um, err := NewURLsMap(s)
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
-
- if um.String() != urlsMap.String() {
- t.Errorf("Expected:\n%+v\ngot:\n%+v", um, urlsMap)
- }
-}
-
-func TestNewURLsMapFromStringMapNormal(t *testing.T) {
- mss := make(map[string]string)
- mss["host0"] = "http://127.0.0.1:2379,http://127.0.0.1:2380"
- mss["host1"] = "http://127.0.0.1:2381,http://127.0.0.1:2382"
- mss["host2"] = "http://127.0.0.1:2383,http://127.0.0.1:2384"
- mss["host3"] = "http://127.0.0.1:2385,http://127.0.0.1:2386"
- urlsMap, err := NewURLsMapFromStringMap(mss, ",")
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- s := "host0=http://127.0.0.1:2379,host0=http://127.0.0.1:2380," +
- "host1=http://127.0.0.1:2381,host1=http://127.0.0.1:2382," +
- "host2=http://127.0.0.1:2383,host2=http://127.0.0.1:2384," +
- "host3=http://127.0.0.1:2385,host3=http://127.0.0.1:2386"
- um, err := NewURLsMap(s)
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
-
- if um.String() != urlsMap.String() {
- t.Errorf("Expected:\n%+v\ngot:\n%+v", um, urlsMap)
- }
-}
diff --git a/client/pkg/verify/verify.go b/client/pkg/verify/verify.go
deleted file mode 100644
index 0cc1b48277f..00000000000
--- a/client/pkg/verify/verify.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package verify
-
-import (
- "fmt"
- "os"
- "strings"
-)
-
-const ENV_VERIFY = "ETCD_VERIFY"
-
-type VerificationType string
-
-const (
- ENV_VERIFY_VALUE_ALL VerificationType = "all"
- ENV_VERIFY_VALUE_ASSERT VerificationType = "assert"
-)
-
-func getEnvVerify() string {
- return strings.ToLower(os.Getenv(ENV_VERIFY))
-}
-
-func IsVerificationEnabled(verification VerificationType) bool {
- env := getEnvVerify()
- return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification))
-}
-
-// EnableVerifications sets `ENV_VERIFY` and returns a function that
-// can be used to bring the original settings.
-func EnableVerifications(verification VerificationType) func() {
- previousEnv := getEnvVerify()
- os.Setenv(ENV_VERIFY, string(verification))
- return func() {
- os.Setenv(ENV_VERIFY, previousEnv)
- }
-}
-
-// EnableAllVerifications enables verification and returns a function
-// that can be used to bring the original settings.
-func EnableAllVerifications() func() {
- return EnableVerifications(ENV_VERIFY_VALUE_ALL)
-}
-
-// DisableVerifications unsets `ENV_VERIFY` and returns a function that
-// can be used to bring the original settings.
-func DisableVerifications() func() {
- previousEnv := getEnvVerify()
- os.Unsetenv(ENV_VERIFY)
- return func() {
- os.Setenv(ENV_VERIFY, previousEnv)
- }
-}
-
-// Verify performs verification if the assertions are enabled.
-// In the default setup running in tests and skipped in the production code.
-func Verify(f func()) {
- if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) {
- f()
- }
-}
-
-// Assert will panic with a given formatted message if the given condition is false.
-func Assert(condition bool, msg string, v ...interface{}) {
- if !condition {
- panic(fmt.Sprintf("assertion failed: "+msg, v...))
- }
-}
diff --git a/client/v2/LICENSE b/client/v2/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/client/v2/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/client/v2/README.md b/client/v2/README.md
deleted file mode 100644
index 9ec7d86ecaa..00000000000
--- a/client/v2/README.md
+++ /dev/null
@@ -1,112 +0,0 @@
-# etcd/client
-
-etcd/client is the Go client library for etcd.
-
-[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client)
-
-For full compatibility, it is recommended to install released versions of clients using go modules.
-
-## Install
-
-```bash
-go get go.etcd.io/etcd/v3/client
-```
-
-## Usage
-
-```go
-package main
-
-import (
- "context"
- "log"
- "time"
-
- "go.etcd.io/etcd/v3/client"
-)
-
-func main() {
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: client.DefaultTransport,
- // set timeout per request to fail fast when the target endpoint is unavailable
- HeaderTimeoutPerRequest: time.Second,
- }
- c, err := client.New(cfg)
- if err != nil {
- log.Fatal(err)
- }
- kapi := client.NewKeysAPI(c)
- // set "/foo" key with "bar" value
- log.Print("Setting '/foo' key with 'bar' value")
- resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Set is done. Metadata is %q\n", resp)
- }
- // get "/foo" key's value
- log.Print("Getting '/foo' key value")
- resp, err = kapi.Get(context.Background(), "/foo", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Get is done. Metadata is %q\n", resp)
- // print value
- log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
- }
-}
-```
-
-## Error Handling
-
-etcd client might return three types of errors.
-
-- context error
-
-Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
-
-- cluster error
-
-Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
-
-- response error
-
-If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
-
-Here is the example code to handle client errors:
-
-```go
-cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
-c, err := client.New(cfg)
-if err != nil {
- log.Fatal(err)
-}
-
-kapi := client.NewKeysAPI(c)
-resp, err := kapi.Set(ctx, "test", "bar", nil)
-if err != nil {
- if err == context.Canceled {
- // ctx is canceled by another routine
- } else if err == context.DeadlineExceeded {
- // ctx is attached with a deadline and it exceeded
- } else if cerr, ok := err.(*client.ClusterError); ok {
- // process (cerr.Errors)
- } else {
- // bad cluster endpoints, which are not etcd servers
- }
-}
-```
-
-
-## Caveat
-
-1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
-
-2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
-
-3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
-
-4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
diff --git a/client/v2/auth_role.go b/client/v2/auth_role.go
deleted file mode 100644
index b6ba7e150dc..00000000000
--- a/client/v2/auth_role.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "net/url"
-)
-
-type Role struct {
- Role string `json:"role"`
- Permissions Permissions `json:"permissions"`
- Grant *Permissions `json:"grant,omitempty"`
- Revoke *Permissions `json:"revoke,omitempty"`
-}
-
-type Permissions struct {
- KV rwPermission `json:"kv"`
-}
-
-type rwPermission struct {
- Read []string `json:"read"`
- Write []string `json:"write"`
-}
-
-type PermissionType int
-
-const (
- ReadPermission PermissionType = iota
- WritePermission
- ReadWritePermission
-)
-
-// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
-// interact with etcd's role creation and modification features.
-func NewAuthRoleAPI(c Client) AuthRoleAPI {
- return &httpAuthRoleAPI{
- client: c,
- }
-}
-
-type AuthRoleAPI interface {
- // AddRole adds a role.
- AddRole(ctx context.Context, role string) error
-
- // RemoveRole removes a role.
- RemoveRole(ctx context.Context, role string) error
-
- // GetRole retrieves role details.
- GetRole(ctx context.Context, role string) (*Role, error)
-
- // GrantRoleKV grants a role some permission prefixes for the KV store.
- GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
-
- // RevokeRoleKV revokes some permission prefixes for a role on the KV store.
- RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
-
- // ListRoles lists roles.
- ListRoles(ctx context.Context) ([]string, error)
-}
-
-type httpAuthRoleAPI struct {
- client httpClient
-}
-
-type authRoleAPIAction struct {
- verb string
- name string
- role *Role
-}
-
-type authRoleAPIList struct{}
-
-func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "roles", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "roles", l.name)
- if l.role == nil {
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
- }
- b, err := json.Marshal(l.role)
- if err != nil {
- panic(err)
- }
- body := bytes.NewReader(b)
- req, _ := http.NewRequest(l.verb, u.String(), body)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
- resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
- var roleList struct {
- Roles []Role `json:"roles"`
- }
- if err = json.Unmarshal(body, &roleList); err != nil {
- return nil, err
- }
- ret := make([]string, 0, len(roleList.Roles))
- for _, r := range roleList.Roles {
- ret = append(ret, r.Role)
- }
- return ret, nil
-}
-
-func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
- role := &Role{
- Role: rolename,
- }
- return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
- return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "DELETE",
- name: rolename,
- })
-}
-
-func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
- resp, body, err := r.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err := json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "GET",
- name: rolename,
- })
-}
-
-func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
- var out rwPermission
- switch permType {
- case ReadPermission:
- out.Read = prefixes
- case WritePermission:
- out.Write = prefixes
- case ReadWritePermission:
- out.Read = prefixes
- out.Write = prefixes
- }
- return out
-}
-
-func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
- rwp := buildRWPermission(prefixes, permType)
- role := &Role{
- Role: rolename,
- Grant: &Permissions{
- KV: rwp,
- },
- }
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
- rwp := buildRWPermission(prefixes, permType)
- role := &Role{
- Role: rolename,
- Revoke: &Permissions{
- KV: rwp,
- },
- }
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
- resp, body, err := r.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
- var role Role
- if err = json.Unmarshal(body, &role); err != nil {
- return nil, err
- }
- return &role, nil
-}
diff --git a/client/v2/auth_user.go b/client/v2/auth_user.go
deleted file mode 100644
index 8e7e2efe833..00000000000
--- a/client/v2/auth_user.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "net/url"
- "path"
-)
-
-var (
- defaultV2AuthPrefix = "/v2/auth"
-)
-
-type User struct {
- User string `json:"user"`
- Password string `json:"password,omitempty"`
- Roles []string `json:"roles"`
- Grant []string `json:"grant,omitempty"`
- Revoke []string `json:"revoke,omitempty"`
-}
-
-// userListEntry is the user representation given by the server for ListUsers
-type userListEntry struct {
- User string `json:"user"`
- Roles []Role `json:"roles"`
-}
-
-type UserRoles struct {
- User string `json:"user"`
- Roles []Role `json:"roles"`
-}
-
-func v2AuthURL(ep url.URL, action string, name string) *url.URL {
- if name != "" {
- ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
- return &ep
- }
- ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
- return &ep
-}
-
-// NewAuthAPI constructs a new AuthAPI that uses HTTP to
-// interact with etcd's general auth features.
-func NewAuthAPI(c Client) AuthAPI {
- return &httpAuthAPI{
- client: c,
- }
-}
-
-type AuthAPI interface {
- // Enable auth.
- Enable(ctx context.Context) error
-
- // Disable auth.
- Disable(ctx context.Context) error
-}
-
-type httpAuthAPI struct {
- client httpClient
-}
-
-func (s *httpAuthAPI) Enable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"PUT"})
-}
-
-func (s *httpAuthAPI) Disable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"DELETE"})
-}
-
-func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
- resp, body, err := s.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-type authAPIAction struct {
- verb string
-}
-
-func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "enable", "")
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
-}
-
-type authError struct {
- Message string `json:"message"`
- Code int `json:"-"`
-}
-
-func (e authError) Error() string {
- return e.Message
-}
-
-// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
-// interact with etcd's user creation and modification features.
-func NewAuthUserAPI(c Client) AuthUserAPI {
- return &httpAuthUserAPI{
- client: c,
- }
-}
-
-type AuthUserAPI interface {
- // AddUser adds a user.
- AddUser(ctx context.Context, username string, password string) error
-
- // RemoveUser removes a user.
- RemoveUser(ctx context.Context, username string) error
-
- // GetUser retrieves user details.
- GetUser(ctx context.Context, username string) (*User, error)
-
- // GrantUser grants a user some permission roles.
- GrantUser(ctx context.Context, username string, roles []string) (*User, error)
-
- // RevokeUser revokes some permission roles from a user.
- RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
-
- // ChangePassword changes the user's password.
- ChangePassword(ctx context.Context, username string, password string) (*User, error)
-
- // ListUsers lists the users.
- ListUsers(ctx context.Context) ([]string, error)
-}
-
-type httpAuthUserAPI struct {
- client httpClient
-}
-
-type authUserAPIAction struct {
- verb string
- username string
- user *User
-}
-
-type authUserAPIList struct{}
-
-func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "users", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "users", l.username)
- if l.user == nil {
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
- }
- b, err := json.Marshal(l.user)
- if err != nil {
- panic(err)
- }
- body := bytes.NewReader(b)
- req, _ := http.NewRequest(l.verb, u.String(), body)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
- resp, body, err := u.client.Do(ctx, &authUserAPIList{})
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
-
- var userList struct {
- Users []userListEntry `json:"users"`
- }
-
- if err = json.Unmarshal(body, &userList); err != nil {
- return nil, err
- }
-
- ret := make([]string, 0, len(userList.Users))
- for _, u := range userList.Users {
- ret = append(ret, u.User)
- }
- return ret, nil
-}
-
-func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
- user := &User{
- User: username,
- Password: password,
- }
- return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
- return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "DELETE",
- username: username,
- })
-}
-
-func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
- resp, body, err := u.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
- return u.modUser(ctx, &authUserAPIAction{
- verb: "GET",
- username: username,
- })
-}
-
-func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
- user := &User{
- User: username,
- Grant: roles,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
- user := &User{
- User: username,
- Revoke: roles,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
- user := &User{
- User: username,
- Password: password,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
- resp, body, err := u.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
- var user User
- if err = json.Unmarshal(body, &user); err != nil {
- var userR UserRoles
- if urerr := json.Unmarshal(body, &userR); urerr != nil {
- return nil, err
- }
- user.User = userR.User
- for _, r := range userR.Roles {
- user.Roles = append(user.Roles, r.Role)
- }
- }
- return &user, nil
-}
diff --git a/client/v2/client.go b/client/v2/client.go
deleted file mode 100644
index a93c528fb36..00000000000
--- a/client/v2/client.go
+++ /dev/null
@@ -1,719 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math/rand"
- "net"
- "net/http"
- "net/url"
- "sort"
- "strconv"
- "sync"
- "time"
-
- "go.etcd.io/etcd/api/v3/version"
-)
-
-var (
- ErrNoEndpoints = errors.New("client: no endpoints available")
- ErrTooManyRedirects = errors.New("client: too many redirects")
- ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
- ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
- errTooManyRedirectChecks = errors.New("client: too many redirect checks")
-
- // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
- // that Do() will not retry a request
- oneShotCtxValue interface{}
-)
-
-var DefaultRequestTimeout = 5 * time.Second
-
-var DefaultTransport CancelableTransport = &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).DialContext,
- TLSHandshakeTimeout: 10 * time.Second,
-}
-
-type EndpointSelectionMode int
-
-const (
- // EndpointSelectionRandom is the default value of the 'SelectionMode'.
- // As the name implies, the client object will pick a node from the members
- // of the cluster in a random fashion. If the cluster has three members, A, B,
- // and C, the client picks any node from its three members as its request
- // destination.
- EndpointSelectionRandom EndpointSelectionMode = iota
-
- // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
- // requests are sent directly to the cluster leader. This reduces
- // forwarding roundtrips compared to making requests to etcd followers
- // who then forward them to the cluster leader. In the event of a leader
- // failure, however, clients configured this way cannot prioritize among
- // the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
- // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
- // maintain its knowledge of current cluster state.
- //
- // This mode should be used with Client.AutoSync().
- EndpointSelectionPrioritizeLeader
-)
-
-type Config struct {
- // Endpoints defines a set of URLs (schemes, hosts and ports only)
- // that can be used to communicate with a logical etcd cluster. For
- // example, a three-node cluster could be provided like so:
- //
- // Endpoints: []string{
- // "http://node1.example.com:2379",
- // "http://node2.example.com:2379",
- // "http://node3.example.com:2379",
- // }
- //
- // If multiple endpoints are provided, the Client will attempt to
- // use them all in the event that one or more of them are unusable.
- //
- // If Client.Sync is ever called, the Client may cache an alternate
- // set of endpoints to continue operation.
- Endpoints []string
-
- // Transport is used by the Client to drive HTTP requests. If not
- // provided, DefaultTransport will be used.
- Transport CancelableTransport
-
- // CheckRedirect specifies the policy for handling HTTP redirects.
- // If CheckRedirect is not nil, the Client calls it before
- // following an HTTP redirect. The sole argument is the number of
- // requests that have already been made. If CheckRedirect returns
- // an error, Client.Do will not make any further requests and return
- // the error back it to the caller.
- //
- // If CheckRedirect is nil, the Client uses its default policy,
- // which is to stop after 10 consecutive requests.
- CheckRedirect CheckRedirectFunc
-
- // Username specifies the user credential to add as an authorization header
- Username string
-
- // Password is the password for the specified user to add as an authorization header
- // to the request.
- Password string
-
- // HeaderTimeoutPerRequest specifies the time limit to wait for response
- // header in a single request made by the Client. The timeout includes
- // connection time, any redirects, and header wait time.
- //
- // For non-watch GET request, server returns the response body immediately.
- // For PUT/POST/DELETE request, server will attempt to commit request
- // before responding, which is expected to take `100ms + 2 * RTT`.
- // For watch request, server returns the header immediately to notify Client
- // watch start. But if server is behind some kind of proxy, the response
- // header may be cached at proxy, and Client cannot rely on this behavior.
- //
- // Especially, wait request will ignore this timeout.
- //
- // One API call may send multiple requests to different etcd servers until it
- // succeeds. Use context of the API to specify the overall timeout.
- //
- // A HeaderTimeoutPerRequest of zero means no timeout.
- HeaderTimeoutPerRequest time.Duration
-
- // SelectionMode is an EndpointSelectionMode enum that specifies the
- // policy for choosing the etcd cluster node to which requests are sent.
- SelectionMode EndpointSelectionMode
-}
-
-func (cfg *Config) transport() CancelableTransport {
- if cfg.Transport == nil {
- return DefaultTransport
- }
- return cfg.Transport
-}
-
-func (cfg *Config) checkRedirect() CheckRedirectFunc {
- if cfg.CheckRedirect == nil {
- return DefaultCheckRedirect
- }
- return cfg.CheckRedirect
-}
-
-// CancelableTransport mimics net/http.Transport, but requires that
-// the object also support request cancellation.
-type CancelableTransport interface {
- http.RoundTripper
- CancelRequest(req *http.Request)
-}
-
-type CheckRedirectFunc func(via int) error
-
-// DefaultCheckRedirect follows up to 10 redirects, but no more.
-var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
- if via > 10 {
- return ErrTooManyRedirects
- }
- return nil
-}
-
-type Client interface {
- // Sync updates the internal cache of the etcd cluster's membership.
- Sync(context.Context) error
-
- // AutoSync periodically calls Sync() every given interval.
- // The recommended sync interval is 10 seconds to 1 minute, which does
- // not bring too much overhead to server and makes client catch up the
- // cluster change in time.
- //
- // The example to use it:
- //
- // for {
- // err := client.AutoSync(ctx, 10*time.Second)
- // if err == context.DeadlineExceeded || err == context.Canceled {
- // break
- // }
- // log.Print(err)
- // }
- AutoSync(context.Context, time.Duration) error
-
- // Endpoints returns a copy of the current set of API endpoints used
- // by Client to resolve HTTP requests. If Sync has ever been called,
- // this may differ from the initial Endpoints provided in the Config.
- Endpoints() []string
-
- // SetEndpoints sets the set of API endpoints used by Client to resolve
- // HTTP requests. If the given endpoints are not valid, an error will be
- // returned
- SetEndpoints(eps []string) error
-
- // GetVersion retrieves the current etcd server and cluster version
- GetVersion(ctx context.Context) (*version.Versions, error)
-
- httpClient
-}
-
-func New(cfg Config) (Client, error) {
- c := &httpClusterClient{
- clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
- rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
- selectionMode: cfg.SelectionMode,
- }
- if cfg.Username != "" {
- c.credentials = &credentials{
- username: cfg.Username,
- password: cfg.Password,
- }
- }
- if err := c.SetEndpoints(cfg.Endpoints); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-type httpClient interface {
- Do(context.Context, httpAction) (*http.Response, []byte, error)
-}
-
-func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
- return func(ep url.URL) httpClient {
- return &redirectFollowingHTTPClient{
- checkRedirect: cr,
- client: &simpleHTTPClient{
- transport: tr,
- endpoint: ep,
- headerTimeout: headerTimeout,
- },
- }
- }
-}
-
-type credentials struct {
- username string
- password string
-}
-
-type httpClientFactory func(url.URL) httpClient
-
-type httpAction interface {
- HTTPRequest(url.URL) *http.Request
-}
-
-type httpClusterClient struct {
- clientFactory httpClientFactory
- endpoints []url.URL
- pinned int
- credentials *credentials
- sync.RWMutex
- rand *rand.Rand
- selectionMode EndpointSelectionMode
-}
-
-func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
- ceps := make([]url.URL, len(eps))
- copy(ceps, eps)
-
- // To perform a lookup on the new endpoint list without using the current
- // client, we'll copy it
- clientCopy := &httpClusterClient{
- clientFactory: c.clientFactory,
- credentials: c.credentials,
- rand: c.rand,
-
- pinned: 0,
- endpoints: ceps,
- }
-
- mAPI := NewMembersAPI(clientCopy)
- leader, err := mAPI.Leader(ctx)
- if err != nil {
- return "", err
- }
- if len(leader.ClientURLs) == 0 {
- return "", ErrNoLeaderEndpoint
- }
-
- return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
-}
-
-func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
- if len(eps) == 0 {
- return []url.URL{}, ErrNoEndpoints
- }
-
- neps := make([]url.URL, len(eps))
- for i, ep := range eps {
- u, err := url.Parse(ep)
- if err != nil {
- return []url.URL{}, err
- }
- neps[i] = *u
- }
- return neps, nil
-}
-
-func (c *httpClusterClient) SetEndpoints(eps []string) error {
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- c.Lock()
- defer c.Unlock()
-
- c.endpoints = shuffleEndpoints(c.rand, neps)
- // We're not doing anything for PrioritizeLeader here. This is
- // due to not having a context meaning we can't call getLeaderEndpoint
- // However, if you're using PrioritizeLeader, you've already been told
- // to regularly call sync, where we do have a ctx, and can figure the
- // leader. PrioritizeLeader is also quite a loose guarantee, so deal
- // with it
- c.pinned = 0
-
- return nil
-}
-
-func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- action := act
- c.RLock()
- leps := len(c.endpoints)
- eps := make([]url.URL, leps)
- n := copy(eps, c.endpoints)
- pinned := c.pinned
-
- if c.credentials != nil {
- action = &authedAction{
- act: act,
- credentials: *c.credentials,
- }
- }
- c.RUnlock()
-
- if leps == 0 {
- return nil, nil, ErrNoEndpoints
- }
-
- if leps != n {
- return nil, nil, errors.New("unable to pick endpoint: copy failed")
- }
-
- var resp *http.Response
- var body []byte
- var err error
- cerr := &ClusterError{}
- isOneShot := ctx.Value(&oneShotCtxValue) != nil
-
- for i := pinned; i < leps+pinned; i++ {
- k := i % leps
- hc := c.clientFactory(eps[k])
- resp, body, err = hc.Do(ctx, action)
- if err != nil {
- cerr.Errors = append(cerr.Errors, err)
- if err == ctx.Err() {
- return nil, nil, ctx.Err()
- }
- if err == context.Canceled || err == context.DeadlineExceeded {
- return nil, nil, err
- }
- } else if resp.StatusCode/100 == 5 {
- switch resp.StatusCode {
- case http.StatusInternalServerError, http.StatusServiceUnavailable:
- // TODO: make sure this is a no leader response
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
- default:
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
- }
- err = cerr.Errors[0]
- }
- if err != nil {
- if !isOneShot {
- continue
- }
- c.Lock()
- c.pinned = (k + 1) % leps
- c.Unlock()
- return nil, nil, err
- }
- if k != pinned {
- c.Lock()
- c.pinned = k
- c.Unlock()
- }
- return resp, body, nil
- }
-
- return nil, nil, cerr
-}
-
-func (c *httpClusterClient) Endpoints() []string {
- c.RLock()
- defer c.RUnlock()
-
- eps := make([]string, len(c.endpoints))
- for i, ep := range c.endpoints {
- eps[i] = ep.String()
- }
-
- return eps
-}
-
-func (c *httpClusterClient) Sync(ctx context.Context) error {
- mAPI := NewMembersAPI(c)
- ms, err := mAPI.List(ctx)
- if err != nil {
- return err
- }
-
- var eps []string
- for _, m := range ms {
- eps = append(eps, m.ClientURLs...)
- }
-
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- npin := 0
-
- switch c.selectionMode {
- case EndpointSelectionRandom:
- c.RLock()
- eq := endpointsEqual(c.endpoints, neps)
- c.RUnlock()
-
- if eq {
- return nil
- }
- // When items in the endpoint list changes, we choose a new pin
- neps = shuffleEndpoints(c.rand, neps)
- case EndpointSelectionPrioritizeLeader:
- nle, err := c.getLeaderEndpoint(ctx, neps)
- if err != nil {
- return ErrNoLeaderEndpoint
- }
-
- for i, n := range neps {
- if n.String() == nle {
- npin = i
- break
- }
- }
- default:
- return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
- }
-
- c.Lock()
- defer c.Unlock()
- c.endpoints = neps
- c.pinned = npin
-
- return nil
-}
-
-func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
- for {
- err := c.Sync(ctx)
- if err != nil {
- return err
- }
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-ticker.C:
- }
- }
-}
-
-func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
- act := &getAction{Prefix: "/version"}
-
- resp, body, err := c.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- switch resp.StatusCode {
- case http.StatusOK:
- if len(body) == 0 {
- return nil, ErrEmptyBody
- }
- var vresp version.Versions
- if err := json.Unmarshal(body, &vresp); err != nil {
- return nil, ErrInvalidJSON
- }
- return &vresp, nil
- default:
- var etcdErr Error
- if err := json.Unmarshal(body, &etcdErr); err != nil {
- return nil, ErrInvalidJSON
- }
- return nil, etcdErr
- }
-}
-
-type roundTripResponse struct {
- resp *http.Response
- err error
-}
-
-type simpleHTTPClient struct {
- transport CancelableTransport
- endpoint url.URL
- headerTimeout time.Duration
-}
-
-// ErrNoRequest indicates that the HTTPRequest object could not be found
-// or was nil. No processing could continue.
-var ErrNoRequest = errors.New("no HTTPRequest was available")
-
-func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- req := act.HTTPRequest(c.endpoint)
- if req == nil {
- return nil, nil, ErrNoRequest
- }
-
- if err := printcURL(req); err != nil {
- return nil, nil, err
- }
-
- isWait := false
- if req.URL != nil {
- ws := req.URL.Query().Get("wait")
- if len(ws) != 0 {
- var err error
- isWait, err = strconv.ParseBool(ws)
- if err != nil {
- return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
- }
- }
- }
-
- var hctx context.Context
- var hcancel context.CancelFunc
- if !isWait && c.headerTimeout > 0 {
- hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
- } else {
- hctx, hcancel = context.WithCancel(ctx)
- }
- defer hcancel()
-
- reqcancel := requestCanceler(c.transport, req)
-
- rtchan := make(chan roundTripResponse, 1)
- go func() {
- resp, err := c.transport.RoundTrip(req)
- rtchan <- roundTripResponse{resp: resp, err: err}
- close(rtchan)
- }()
-
- var resp *http.Response
- var err error
-
- select {
- case rtresp := <-rtchan:
- resp, err = rtresp.resp, rtresp.err
- case <-hctx.Done():
- // cancel and wait for request to actually exit before continuing
- reqcancel()
- rtresp := <-rtchan
- resp = rtresp.resp
- switch {
- case ctx.Err() != nil:
- err = ctx.Err()
- case hctx.Err() != nil:
- err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
- default:
- panic("failed to get error from context")
- }
- }
-
- // always check for resp nil-ness to deal with possible
- // race conditions between channels above
- defer func() {
- if resp != nil {
- resp.Body.Close()
- }
- }()
-
- if err != nil {
- return nil, nil, err
- }
-
- var body []byte
- done := make(chan struct{})
- go func() {
- body, err = io.ReadAll(resp.Body)
- done <- struct{}{}
- }()
-
- select {
- case <-ctx.Done():
- if resp != nil {
- resp.Body.Close()
- }
- <-done
- return nil, nil, ctx.Err()
- case <-done:
- }
-
- return resp, body, err
-}
-
-type authedAction struct {
- act httpAction
- credentials credentials
-}
-
-func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
- r := a.act.HTTPRequest(url)
- r.SetBasicAuth(a.credentials.username, a.credentials.password)
- return r
-}
-
-type redirectFollowingHTTPClient struct {
- client httpClient
- checkRedirect CheckRedirectFunc
-}
-
-func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- next := act
- for i := 0; i < 100; i++ {
- if i > 0 {
- if err := r.checkRedirect(i); err != nil {
- return nil, nil, err
- }
- }
- resp, body, err := r.client.Do(ctx, next)
- if err != nil {
- return nil, nil, err
- }
- if resp.StatusCode/100 == 3 {
- hdr := resp.Header.Get("Location")
- if hdr == "" {
- return nil, nil, errors.New("location header not set")
- }
- loc, err := url.Parse(hdr)
- if err != nil {
- return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr)
- }
- next = &redirectedHTTPAction{
- action: act,
- location: *loc,
- }
- continue
- }
- return resp, body, nil
- }
-
- return nil, nil, errTooManyRedirectChecks
-}
-
-type redirectedHTTPAction struct {
- action httpAction
- location url.URL
-}
-
-func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
- orig := r.action.HTTPRequest(ep)
- orig.URL = &r.location
- return orig
-}
-
-func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
- // copied from Go 1.9<= rand.Rand.Perm
- n := len(eps)
- p := make([]int, n)
- for i := 0; i < n; i++ {
- j := r.Intn(i + 1)
- p[i] = p[j]
- p[j] = i
- }
- neps := make([]url.URL, n)
- for i, k := range p {
- neps[i] = eps[k]
- }
- return neps
-}
-
-func endpointsEqual(left, right []url.URL) bool {
- if len(left) != len(right) {
- return false
- }
-
- sLeft := make([]string, len(left))
- sRight := make([]string, len(right))
- for i, l := range left {
- sLeft[i] = l.String()
- }
- for i, r := range right {
- sRight[i] = r.String()
- }
-
- sort.Strings(sLeft)
- sort.Strings(sRight)
- for i := range sLeft {
- if sLeft[i] != sRight[i] {
- return false
- }
- }
- return true
-}
diff --git a/client/v2/client_test.go b/client/v2/client_test.go
deleted file mode 100644
index abfcff93d1b..00000000000
--- a/client/v2/client_test.go
+++ /dev/null
@@ -1,1096 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "errors"
- "io"
- "math/rand"
- "net/http"
- "net/url"
- "reflect"
- "sort"
- "strings"
- "testing"
- "time"
-
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-type actionAssertingHTTPClient struct {
- t *testing.T
- num int
- act httpAction
-
- resp http.Response
- body []byte
- err error
-}
-
-func (a *actionAssertingHTTPClient) Do(_ context.Context, act httpAction) (*http.Response, []byte, error) {
- if !reflect.DeepEqual(a.act, act) {
- a.t.Errorf("#%d: unexpected httpAction: want=%#v got=%#v", a.num, a.act, act)
- }
-
- return &a.resp, a.body, a.err
-}
-
-type staticHTTPClient struct {
- resp http.Response
- body []byte
- err error
-}
-
-func (s *staticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) {
- return &s.resp, s.body, s.err
-}
-
-type staticHTTPAction struct {
- request http.Request
-}
-
-func (s *staticHTTPAction) HTTPRequest(url.URL) *http.Request {
- return &s.request
-}
-
-type staticHTTPResponse struct {
- resp http.Response
- body []byte
- err error
-}
-
-type multiStaticHTTPClient struct {
- responses []staticHTTPResponse
- cur int
-}
-
-func (s *multiStaticHTTPClient) Do(context.Context, httpAction) (*http.Response, []byte, error) {
- r := s.responses[s.cur]
- s.cur++
- return &r.resp, r.body, r.err
-}
-
-func newStaticHTTPClientFactory(responses []staticHTTPResponse) httpClientFactory {
- var cur int
- return func(url.URL) httpClient {
- r := responses[cur]
- cur++
- return &staticHTTPClient{resp: r.resp, body: r.body, err: r.err}
- }
-}
-
-type fakeTransport struct {
- respchan chan *http.Response
- errchan chan error
- startCancel chan struct{}
- finishCancel chan struct{}
-}
-
-func newFakeTransport() *fakeTransport {
- return &fakeTransport{
- respchan: make(chan *http.Response, 1),
- errchan: make(chan error, 1),
- startCancel: make(chan struct{}, 1),
- finishCancel: make(chan struct{}, 1),
- }
-}
-
-func (t *fakeTransport) CancelRequest(*http.Request) {
- t.startCancel <- struct{}{}
-}
-
-type fakeAction struct{}
-
-func (a *fakeAction) HTTPRequest(url.URL) *http.Request {
- return &http.Request{}
-}
-
-func TestSimpleHTTPClientDoSuccess(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- tr.respchan <- &http.Response{
- StatusCode: http.StatusTeapot,
- Body: io.NopCloser(strings.NewReader("foo")),
- }
-
- resp, body, err := c.Do(context.Background(), &fakeAction{})
- if err != nil {
- t.Fatalf("incorrect error value: want=nil got=%v", err)
- }
-
- wantCode := http.StatusTeapot
- if wantCode != resp.StatusCode {
- t.Fatalf("invalid response code: want=%d got=%d", wantCode, resp.StatusCode)
- }
-
- wantBody := []byte("foo")
- if !reflect.DeepEqual(wantBody, body) {
- t.Fatalf("invalid response body: want=%q got=%q", wantBody, body)
- }
-}
-
-func TestSimpleHTTPClientDoError(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- tr.errchan <- errors.New("fixture")
-
- _, _, err := c.Do(context.Background(), &fakeAction{})
- if err == nil {
- t.Fatalf("expected non-nil error, got nil")
- }
-}
-
-type nilAction struct{}
-
-func (a *nilAction) HTTPRequest(url.URL) *http.Request {
- return nil
-}
-
-func TestSimpleHTTPClientDoNilRequest(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- tr.errchan <- errors.New("fixture")
-
- _, _, err := c.Do(context.Background(), &nilAction{})
- if err != ErrNoRequest {
- t.Fatalf("expected non-nil error, got nil")
- }
-}
-
-func TestSimpleHTTPClientDoCancelContext(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- tr.startCancel <- struct{}{}
- tr.finishCancel <- struct{}{}
-
- _, _, err := c.Do(context.Background(), &fakeAction{})
- if err == nil {
- t.Fatalf("expected non-nil error, got nil")
- }
-}
-
-type checkableReadCloser struct {
- io.ReadCloser
- closed bool
-}
-
-func (c *checkableReadCloser) Close() error {
- if !c.closed {
- c.closed = true
- return c.ReadCloser.Close()
- }
- return nil
-}
-
-func TestSimpleHTTPClientDoCancelContextResponseBodyClosed(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- // create an already-cancelled context
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
-
- body := &checkableReadCloser{ReadCloser: io.NopCloser(strings.NewReader("foo"))}
- go func() {
- // wait that simpleHTTPClient knows the context is already timed out,
- // and calls CancelRequest
- testutil.WaitSchedule()
-
- // response is returned before cancel effects
- tr.respchan <- &http.Response{Body: body}
- }()
-
- _, _, err := c.Do(ctx, &fakeAction{})
- if err == nil {
- t.Fatalf("expected non-nil error, got nil")
- }
-
- if !body.closed {
- t.Fatalf("expected closed body")
- }
-}
-
-type blockingBody struct {
- c chan struct{}
-}
-
-func (bb *blockingBody) Read(p []byte) (n int, err error) {
- <-bb.c
- return 0, errors.New("closed")
-}
-
-func (bb *blockingBody) Close() error {
- close(bb.c)
- return nil
-}
-
-func TestSimpleHTTPClientDoCancelContextResponseBodyClosedWithBlockingBody(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- ctx, cancel := context.WithCancel(context.Background())
- body := &checkableReadCloser{ReadCloser: &blockingBody{c: make(chan struct{})}}
- go func() {
- tr.respchan <- &http.Response{Body: body}
- time.Sleep(2 * time.Millisecond)
- // cancel after the body is received
- cancel()
- }()
-
- _, _, err := c.Do(ctx, &fakeAction{})
- if err != context.Canceled {
- t.Fatalf("expected %+v, got %+v", context.Canceled, err)
- }
-
- if !body.closed {
- t.Fatalf("expected closed body")
- }
-}
-
-func TestSimpleHTTPClientDoCancelContextWaitForRoundTrip(t *testing.T) {
- tr := newFakeTransport()
- c := &simpleHTTPClient{transport: tr}
-
- donechan := make(chan struct{})
- ctx, cancel := context.WithCancel(context.Background())
- go func() {
- c.Do(ctx, &fakeAction{})
- close(donechan)
- }()
-
- // This should call CancelRequest and begin the cancellation process
- cancel()
-
- select {
- case <-donechan:
- t.Fatalf("simpleHTTPClient.Do should not have exited yet")
- default:
- }
-
- tr.finishCancel <- struct{}{}
-
- select {
- case <-donechan:
- //expected behavior
- return
- case <-time.After(time.Second):
- t.Fatalf("simpleHTTPClient.Do did not exit within 1s")
- }
-}
-
-func TestSimpleHTTPClientDoHeaderTimeout(t *testing.T) {
- tr := newFakeTransport()
- tr.finishCancel <- struct{}{}
- c := &simpleHTTPClient{transport: tr, headerTimeout: time.Millisecond}
-
- errc := make(chan error, 1)
- go func() {
- _, _, err := c.Do(context.Background(), &fakeAction{})
- errc <- err
- }()
-
- select {
- case err := <-errc:
- if err == nil {
- t.Fatalf("expected non-nil error, got nil")
- }
- case <-time.After(time.Second):
- t.Fatalf("unexpected timeout when waiting for the test to finish")
- }
-}
-
-func TestHTTPClusterClientDo(t *testing.T) {
- fakeErr := errors.New("fake!")
- fakeURL := url.URL{}
- tests := []struct {
- client *httpClusterClient
- ctx context.Context
-
- wantCode int
- wantErr error
- wantPinned int
- }{
- // first good response short-circuits Do
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- {err: fakeErr},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantCode: http.StatusTeapot,
- },
-
- // fall through to good endpoint if err is arbitrary
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {err: fakeErr},
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantCode: http.StatusTeapot,
- wantPinned: 1,
- },
-
- // context.Canceled short-circuits Do
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {err: context.Canceled},
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantErr: context.Canceled,
- },
-
- // return err if there are no endpoints
- {
- client: &httpClusterClient{
- endpoints: []url.URL{},
- clientFactory: newHTTPClientFactory(nil, nil, 0),
- rand: rand.New(rand.NewSource(0)),
- },
- wantErr: ErrNoEndpoints,
- },
-
- // return err if all endpoints return arbitrary errors
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {err: fakeErr},
- {err: fakeErr},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantErr: &ClusterError{Errors: []error{fakeErr, fakeErr}},
- },
-
- // 500-level errors cause Do to fallthrough to next endpoint
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {resp: http.Response{StatusCode: http.StatusBadGateway}},
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- wantCode: http.StatusTeapot,
- wantPinned: 1,
- },
-
- // 500-level errors cause one shot Do to fallthrough to next endpoint
- {
- client: &httpClusterClient{
- endpoints: []url.URL{fakeURL, fakeURL},
- clientFactory: newStaticHTTPClientFactory(
- []staticHTTPResponse{
- {resp: http.Response{StatusCode: http.StatusBadGateway}},
- {resp: http.Response{StatusCode: http.StatusTeapot}},
- },
- ),
- rand: rand.New(rand.NewSource(0)),
- },
- ctx: context.WithValue(context.Background(), &oneShotCtxValue, &oneShotCtxValue),
- wantErr: errors.New("client: etcd member returns server error [Bad Gateway]"),
- wantPinned: 1,
- },
- }
-
- for i, tt := range tests {
- if tt.ctx == nil {
- tt.ctx = context.Background()
- }
- resp, _, err := tt.client.Do(tt.ctx, nil)
- if (tt.wantErr == nil && tt.wantErr != err) || (tt.wantErr != nil && tt.wantErr.Error() != err.Error()) {
- t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
- continue
- }
-
- if resp == nil {
- if tt.wantCode != 0 {
- t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
- continue
- }
- } else if resp.StatusCode != tt.wantCode {
- t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
- continue
- }
-
- if tt.client.pinned != tt.wantPinned {
- t.Errorf("#%d: pinned=%d, want=%d", i, tt.client.pinned, tt.wantPinned)
- }
- }
-}
-
-func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) {
- fakeURL := url.URL{}
- tr := newFakeTransport()
- tr.finishCancel <- struct{}{}
- c := &httpClusterClient{
- clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0),
- endpoints: []url.URL{fakeURL},
- }
-
- errc := make(chan error, 1)
- go func() {
- ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
- defer cancel()
- _, _, err := c.Do(ctx, &fakeAction{})
- errc <- err
- }()
-
- select {
- case err := <-errc:
- if err != context.DeadlineExceeded {
- t.Errorf("err = %+v, want %+v", err, context.DeadlineExceeded)
- }
- case <-time.After(time.Second):
- t.Fatalf("unexpected timeout when waiting for request to deadline exceed")
- }
-}
-
-type fakeCancelContext struct{}
-
-var errFakeCancelContext = errors.New("fake context canceled")
-
-func (f fakeCancelContext) Deadline() (time.Time, bool) { return time.Time{}, false }
-func (f fakeCancelContext) Done() <-chan struct{} {
- d := make(chan struct{}, 1)
- d <- struct{}{}
- return d
-}
-func (f fakeCancelContext) Err() error { return errFakeCancelContext }
-func (f fakeCancelContext) Value(key interface{}) interface{} { return 1 }
-
-func withTimeout(parent context.Context, timeout time.Duration) (
- ctx context.Context,
- cancel context.CancelFunc) {
- ctx = parent
- cancel = func() {
- ctx = nil
- }
- return ctx, cancel
-}
-
-func TestHTTPClusterClientDoCanceledContext(t *testing.T) {
- fakeURL := url.URL{}
- tr := newFakeTransport()
- tr.finishCancel <- struct{}{}
- c := &httpClusterClient{
- clientFactory: newHTTPClientFactory(tr, DefaultCheckRedirect, 0),
- endpoints: []url.URL{fakeURL},
- }
-
- errc := make(chan error, 1)
- go func() {
- ctx, cancel := withTimeout(fakeCancelContext{}, time.Millisecond)
- cancel()
- _, _, err := c.Do(ctx, &fakeAction{})
- errc <- err
- }()
-
- select {
- case err := <-errc:
- if err != errFakeCancelContext {
- t.Errorf("err = %+v, want %+v", err, errFakeCancelContext)
- }
- case <-time.After(time.Second):
- t.Fatalf("unexpected timeout when waiting for request to fake context canceled")
- }
-}
-
-func TestRedirectedHTTPAction(t *testing.T) {
- act := &redirectedHTTPAction{
- action: &staticHTTPAction{
- request: http.Request{
- Method: "DELETE",
- URL: &url.URL{
- Scheme: "https",
- Host: "foo.example.com",
- Path: "/ping",
- },
- },
- },
- location: url.URL{
- Scheme: "https",
- Host: "bar.example.com",
- Path: "/pong",
- },
- }
-
- want := &http.Request{
- Method: "DELETE",
- URL: &url.URL{
- Scheme: "https",
- Host: "bar.example.com",
- Path: "/pong",
- },
- }
- got := act.HTTPRequest(url.URL{Scheme: "http", Host: "baz.example.com", Path: "/pang"})
-
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("HTTPRequest is %#v, want %#v", want, got)
- }
-}
-
-func TestRedirectFollowingHTTPClient(t *testing.T) {
- tests := []struct {
- checkRedirect CheckRedirectFunc
- client httpClient
- wantCode int
- wantErr error
- }{
- // errors bubbled up
- {
- checkRedirect: func(int) error { return ErrTooManyRedirects },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- err: errors.New("fail!"),
- },
- },
- },
- wantErr: errors.New("fail!"),
- },
-
- // no need to follow redirect if none given
- {
- checkRedirect: func(int) error { return ErrTooManyRedirects },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
- },
- },
- wantCode: http.StatusTeapot,
- },
-
- // redirects if less than max
- {
- checkRedirect: func(via int) error {
- if via >= 2 {
- return ErrTooManyRedirects
- }
- return nil
- },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
- },
- },
- wantCode: http.StatusTeapot,
- },
-
- // succeed after reaching max redirects
- {
- checkRedirect: func(via int) error {
- if via >= 3 {
- return ErrTooManyRedirects
- }
- return nil
- },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
- },
- },
- wantCode: http.StatusTeapot,
- },
-
- // fail if too many redirects
- {
- checkRedirect: func(via int) error {
- if via >= 2 {
- return ErrTooManyRedirects
- }
- return nil
- },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- {
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
- },
- },
- wantErr: ErrTooManyRedirects,
- },
-
- // fail if Location header not set
- {
- checkRedirect: func(int) error { return ErrTooManyRedirects },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- },
- },
- },
- },
- wantErr: errors.New("location header not set"),
- },
-
- // fail if Location header is invalid
- {
- checkRedirect: func(int) error { return ErrTooManyRedirects },
- client: &multiStaticHTTPClient{
- responses: []staticHTTPResponse{
- {
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{":"}},
- },
- },
- },
- },
- wantErr: errors.New("location header not valid URL: :"),
- },
-
- // fail if redirects checked way too many times
- {
- checkRedirect: func(int) error { return nil },
- client: &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusTemporaryRedirect,
- Header: http.Header{"Location": []string{"http://example.com"}},
- },
- },
- wantErr: errTooManyRedirectChecks,
- },
- }
-
- for i, tt := range tests {
- client := &redirectFollowingHTTPClient{client: tt.client, checkRedirect: tt.checkRedirect}
- resp, _, err := client.Do(context.Background(), nil)
- if (tt.wantErr == nil && tt.wantErr != err) || (tt.wantErr != nil && tt.wantErr.Error() != err.Error()) {
- t.Errorf("#%d: got err=%v, want=%v", i, err, tt.wantErr)
- continue
- }
-
- if resp == nil {
- if tt.wantCode != 0 {
- t.Errorf("#%d: resp is nil, want=%d", i, tt.wantCode)
- }
- continue
- }
-
- if resp.StatusCode != tt.wantCode {
- t.Errorf("#%d: resp code=%d, want=%d", i, resp.StatusCode, tt.wantCode)
- continue
- }
- }
-}
-
-func TestDefaultCheckRedirect(t *testing.T) {
- tests := []struct {
- num int
- err error
- }{
- {0, nil},
- {5, nil},
- {10, nil},
- {11, ErrTooManyRedirects},
- {29, ErrTooManyRedirects},
- }
-
- for i, tt := range tests {
- err := DefaultCheckRedirect(tt.num)
- if !reflect.DeepEqual(tt.err, err) {
- t.Errorf("#%d: want=%#v got=%#v", i, tt.err, err)
- }
- }
-}
-
-func TestHTTPClusterClientSync(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
-
- want := []string{"http://127.0.0.1:2379"}
- got := hc.Endpoints()
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got)
- }
-
- err = hc.Sync(context.Background())
- if err != nil {
- t.Fatalf("unexpected error during Sync: %#v", err)
- }
-
- want = []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"}
- got = hc.Endpoints()
- sort.Strings(got)
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints post-Sync: want=%#v got=%#v", want, got)
- }
-
- err = hc.SetEndpoints([]string{"http://127.0.0.1:4009"})
- if err != nil {
- t.Fatalf("unexpected error during reset: %#v", err)
- }
-
- want = []string{"http://127.0.0.1:4009"}
- got = hc.Endpoints()
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints post-reset: want=%#v got=%#v", want, got)
- }
-}
-
-func TestHTTPClusterClientSyncFail(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {err: errors.New("fail!")},
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
-
- want := []string{"http://127.0.0.1:2379"}
- got := hc.Endpoints()
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints: want=%#v got=%#v", want, got)
- }
-
- err = hc.Sync(context.Background())
- if err == nil {
- t.Fatalf("got nil error during Sync")
- }
-
- got = hc.Endpoints()
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("incorrect endpoints after failed Sync: want=%#v got=%#v", want, got)
- }
-}
-
-func TestHTTPClusterClientAutoSyncCancelContext(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
- ctx, cancel := context.WithCancel(context.Background())
- cancel()
-
- err = hc.AutoSync(ctx, time.Hour)
- if err != context.Canceled {
- t.Fatalf("incorrect error value: want=%v got=%v", context.Canceled, err)
- }
-}
-
-func TestHTTPClusterClientAutoSyncFail(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {err: errors.New("fail!")},
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:2379"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
-
- err = hc.AutoSync(context.Background(), time.Hour)
- if !strings.HasPrefix(err.Error(), ErrClusterUnavailable.Error()) {
- t.Fatalf("incorrect error value: want=%v got=%v", ErrClusterUnavailable, err)
- }
-}
-
-func TestHTTPClusterClientGetVersion(t *testing.T) {
- body := []byte(`{"etcdserver":"2.3.2","etcdcluster":"2.3.0"}`)
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Length": []string{"44"}}},
- body: body,
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
-
- actual, err := hc.GetVersion(context.Background())
- if err != nil {
- t.Errorf("non-nil error: %#v", err)
- }
- expected := version.Versions{Server: "2.3.2", Cluster: "2.3.0"}
- if !reflect.DeepEqual(&expected, actual) {
- t.Errorf("incorrect Response: want=%#v got=%#v", expected, actual)
- }
-}
-
-// TestHTTPClusterClientSyncPinEndpoint tests that Sync() pins the endpoint when
-// it gets the exactly same member list as before.
-func TestHTTPClusterClientSyncPinEndpoint(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
- pinnedEndpoint := hc.endpoints[hc.pinned]
-
- for i := 0; i < 3; i++ {
- err = hc.Sync(context.Background())
- if err != nil {
- t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
- }
-
- if g := hc.endpoints[hc.pinned]; g != pinnedEndpoint {
- t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, pinnedEndpoint)
- }
- }
-}
-
-// TestHTTPClusterClientSyncUnpinEndpoint tests that Sync() unpins the endpoint when
-// it gets a different member list than before.
-func TestHTTPClusterClientSyncUnpinEndpoint(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- }
- err := hc.SetEndpoints([]string{"http://127.0.0.1:4003", "http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"})
- if err != nil {
- t.Fatalf("unexpected error during setup: %#v", err)
- }
- wants := []string{"http://127.0.0.1:2379", "http://127.0.0.1:4001", "http://127.0.0.1:4002"}
-
- for i := 0; i < 3; i++ {
- err = hc.Sync(context.Background())
- if err != nil {
- t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
- }
-
- if g := hc.endpoints[hc.pinned]; g.String() != wants[i] {
- t.Errorf("#%d: pinned endpoint = %v, want %v", i, g, wants[i])
- }
- }
-}
-
-// TestHTTPClusterClientSyncPinLeaderEndpoint tests that Sync() pins the leader
-// when the selection mode is EndpointSelectionPrioritizeLeader
-func TestHTTPClusterClientSyncPinLeaderEndpoint(t *testing.T) {
- cf := newStaticHTTPClientFactory([]staticHTTPResponse{
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- {
- resp: http.Response{StatusCode: http.StatusOK, Header: http.Header{"Content-Type": []string{"application/json"}}},
- body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}`),
- },
- })
-
- hc := &httpClusterClient{
- clientFactory: cf,
- rand: rand.New(rand.NewSource(0)),
- selectionMode: EndpointSelectionPrioritizeLeader,
- endpoints: []url.URL{{}}, // Need somewhere to pretend to send to initially
- }
-
- wants := []string{"http://127.0.0.1:4003", "http://127.0.0.1:4002"}
-
- for i, want := range wants {
- err := hc.Sync(context.Background())
- if err != nil {
- t.Fatalf("#%d: unexpected error during Sync: %#v", i, err)
- }
-
- pinned := hc.endpoints[hc.pinned].String()
- if pinned != want {
- t.Errorf("#%d: pinned endpoint = %v, want %v", i, pinned, want)
- }
- }
-}
-
-func TestHTTPClusterClientResetFail(t *testing.T) {
- tests := [][]string{
- // need at least one endpoint
- {},
-
- // urls must be valid
- {":"},
- }
-
- for i, tt := range tests {
- hc := &httpClusterClient{rand: rand.New(rand.NewSource(0))}
- err := hc.SetEndpoints(tt)
- if err == nil {
- t.Errorf("#%d: expected non-nil error", i)
- }
- }
-}
-
-func TestHTTPClusterClientResetPinRandom(t *testing.T) {
- round := 2000
- pinNum := 0
- for i := 0; i < round; i++ {
- hc := &httpClusterClient{rand: rand.New(rand.NewSource(int64(i)))}
- err := hc.SetEndpoints([]string{"http://127.0.0.1:4001", "http://127.0.0.1:4002", "http://127.0.0.1:4003"})
- if err != nil {
- t.Fatalf("#%d: reset error (%v)", i, err)
- }
- if hc.endpoints[hc.pinned].String() == "http://127.0.0.1:4001" {
- pinNum++
- }
- }
-
- min := 1.0/3.0 - 0.05
- max := 1.0/3.0 + 0.05
- if ratio := float64(pinNum) / float64(round); ratio > max || ratio < min {
- t.Errorf("pinned ratio = %v, want [%v, %v]", ratio, min, max)
- }
-}
diff --git a/client/v2/curl.go b/client/v2/curl.go
deleted file mode 100644
index 8d12367541e..00000000000
--- a/client/v2/curl.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "os"
-)
-
-var (
- cURLDebug = false
-)
-
-func EnablecURLDebug() {
- cURLDebug = true
-}
-
-func DisablecURLDebug() {
- cURLDebug = false
-}
-
-// printcURL prints the cURL equivalent request to stderr.
-// It returns an error if the body of the request cannot
-// be read.
-// The caller MUST cancel the request if there is an error.
-func printcURL(req *http.Request) error {
- if !cURLDebug {
- return nil
- }
- var (
- command string
- b []byte
- err error
- )
-
- if req.URL != nil {
- command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
- }
-
- if req.Body != nil {
- b, err = io.ReadAll(req.Body)
- if err != nil {
- return err
- }
- command += fmt.Sprintf(" -d %q", string(b))
- }
-
- fmt.Fprintf(os.Stderr, "cURL Command: %q\n", command)
-
- // reset body
- body := bytes.NewBuffer(b)
- req.Body = io.NopCloser(body)
-
- return nil
-}
diff --git a/client/v2/discover.go b/client/v2/discover.go
deleted file mode 100644
index 646ba5dada7..00000000000
--- a/client/v2/discover.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "go.etcd.io/etcd/client/pkg/v3/srv"
-)
-
-// Discoverer is an interface that wraps the Discover method.
-type Discoverer interface {
- // Discover looks up the etcd servers for the domain.
- Discover(domain string, serviceName string) ([]string, error)
-}
-
-type srvDiscover struct{}
-
-// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
-func NewSRVDiscover() Discoverer {
- return &srvDiscover{}
-}
-
-func (d *srvDiscover) Discover(domain string, serviceName string) ([]string, error) {
- srvs, err := srv.GetClient("etcd-client", domain, serviceName)
- if err != nil {
- return nil, err
- }
- return srvs.Endpoints, nil
-}
diff --git a/client/v2/doc.go b/client/v2/doc.go
deleted file mode 100644
index 68284c20a89..00000000000
--- a/client/v2/doc.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package client provides bindings for the etcd APIs.
-
-Create a Config and exchange it for a Client:
-
- import (
- "net/http"
- "context"
-
- "go.etcd.io/etcd/client/v2"
- )
-
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: DefaultTransport,
- }
-
- c, err := client.New(cfg)
- if err != nil {
- // handle error
- }
-
-Clients are safe for concurrent use by multiple goroutines.
-
-Create a KeysAPI using the Client, then use it to interact with etcd:
-
- kAPI := client.NewKeysAPI(c)
-
- // create a new key /foo with the value "bar"
- _, err = kAPI.Create(context.Background(), "/foo", "bar")
- if err != nil {
- // handle error
- }
-
- // delete the newly created key only if the value is still "bar"
- _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
- if err != nil {
- // handle error
- }
-
-Use a custom context to set timeouts on your operations:
-
- import "time"
-
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- // set a new key, ignoring its previous state
- _, err := kAPI.Set(ctx, "/ping", "pong", nil)
- if err != nil {
- if err == context.DeadlineExceeded {
- // request took longer than 5s
- } else {
- // handle error
- }
- }
-*/
-package client
diff --git a/client/v2/fake_transport_test.go b/client/v2/fake_transport_test.go
deleted file mode 100644
index 7a725c17cb8..00000000000
--- a/client/v2/fake_transport_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "errors"
- "net/http"
-)
-
-func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- select {
- case resp := <-t.respchan:
- return resp, nil
- case err := <-t.errchan:
- return nil, err
- case <-t.startCancel:
- case <-req.Cancel:
- }
- select {
- // this simulates that the request is finished before cancel effects
- case resp := <-t.respchan:
- return resp, nil
- // wait on finishCancel to simulate taking some amount of
- // time while calling CancelRequest
- case <-t.finishCancel:
- return nil, errors.New("cancelled")
- }
-}
diff --git a/client/v2/go.mod b/client/v2/go.mod
deleted file mode 100644
index 4adaae9330f..00000000000
--- a/client/v2/go.mod
+++ /dev/null
@@ -1,32 +0,0 @@
-module go.etcd.io/etcd/client/v2
-
-go 1.19
-
-require (
- go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
- sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6
-)
-
-require (
- github.com/coreos/go-semver v0.3.1 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/stretchr/testify v1.8.1 // indirect
- gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
-)
-
-replace (
- go.etcd.io/etcd/api/v3 => ../../api
- go.etcd.io/etcd/client/pkg/v3 => ../pkg
-)
-
-// Bad imports are sometimes causing attempts to pull that code.
-// This makes the error more explicit.
-replace (
- go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/pkg/v3 => ./FORBIDDED_DEPENDENCY
- go.etcd.io/etcd/tests/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
-)
diff --git a/client/v2/go.sum b/client/v2/go.sum
deleted file mode 100644
index e18a9825d55..00000000000
--- a/client/v2/go.sum
+++ /dev/null
@@ -1,27 +0,0 @@
-github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
-github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
diff --git a/client/v2/keys_bench_test.go b/client/v2/keys_bench_test.go
deleted file mode 100644
index ff136033452..00000000000
--- a/client/v2/keys_bench_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "encoding/json"
- "net/http"
- "reflect"
- "strings"
- "testing"
-)
-
-func createTestNode(size int) *Node {
- return &Node{
- Key: strings.Repeat("a", 30),
- Value: strings.Repeat("a", size),
- CreatedIndex: 123456,
- ModifiedIndex: 123456,
- TTL: 123456789,
- }
-}
-
-func createTestNodeWithChildren(children, size int) *Node {
- node := createTestNode(size)
- for i := 0; i < children; i++ {
- node.Nodes = append(node.Nodes, createTestNode(size))
- }
- return node
-}
-
-func createTestResponse(children, size int) *Response {
- return &Response{
- Action: "aaaaa",
- Node: createTestNodeWithChildren(children, size),
- PrevNode: nil,
- }
-}
-
-func benchmarkResponseUnmarshalling(b *testing.B, children, size int) {
- header := http.Header{}
- header.Add("X-Etcd-Index", "123456")
- response := createTestResponse(children, size)
- body, err := json.Marshal(response)
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- newResponse := new(Response)
- for i := 0; i < b.N; i++ {
- if newResponse, err = unmarshalSuccessfulKeysResponse(header, body); err != nil {
- b.Errorf("error unmarshalling response (%v)", err)
- }
-
- }
- if !reflect.DeepEqual(response.Node, newResponse.Node) {
- b.Errorf("Unexpected difference in a parsed response: \n%+v\n%+v", response, newResponse)
- }
-}
-
-func BenchmarkSmallResponseUnmarshal(b *testing.B) {
- benchmarkResponseUnmarshalling(b, 30, 20)
-}
-
-func BenchmarkManySmallResponseUnmarshal(b *testing.B) {
- benchmarkResponseUnmarshalling(b, 3000, 20)
-}
-
-func BenchmarkMediumResponseUnmarshal(b *testing.B) {
- benchmarkResponseUnmarshalling(b, 300, 200)
-}
-
-func BenchmarkLargeResponseUnmarshal(b *testing.B) {
- benchmarkResponseUnmarshalling(b, 3000, 2000)
-}
diff --git a/client/v2/keys_test.go b/client/v2/keys_test.go
deleted file mode 100644
index 05aeb3f7e10..00000000000
--- a/client/v2/keys_test.go
+++ /dev/null
@@ -1,1429 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "reflect"
- "testing"
- "time"
-)
-
-func TestV2KeysURLHelper(t *testing.T) {
- tests := []struct {
- endpoint url.URL
- prefix string
- key string
- want url.URL
- }{
- // key is empty, no problem
- {
- endpoint: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"},
- prefix: "",
- key: "",
- want: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"},
- },
-
- // key is joined to path
- {
- endpoint: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"},
- prefix: "",
- key: "/foo/bar",
- want: url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys/foo/bar"},
- },
-
- // key is joined to path when path is empty
- {
- endpoint: url.URL{Scheme: "http", Host: "example.com", Path: ""},
- prefix: "",
- key: "/foo/bar",
- want: url.URL{Scheme: "http", Host: "example.com", Path: "/foo/bar"},
- },
-
- // Host field carries through with port
- {
- endpoint: url.URL{Scheme: "http", Host: "example.com:8080", Path: "/v2/keys"},
- prefix: "",
- key: "",
- want: url.URL{Scheme: "http", Host: "example.com:8080", Path: "/v2/keys"},
- },
-
- // Scheme carries through
- {
- endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/v2/keys"},
- prefix: "",
- key: "",
- want: url.URL{Scheme: "https", Host: "example.com", Path: "/v2/keys"},
- },
- // Prefix is applied
- {
- endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"},
- prefix: "/bar",
- key: "/baz",
- want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar/baz"},
- },
- // Prefix is joined to path
- {
- endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"},
- prefix: "/bar",
- key: "",
- want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar"},
- },
- // Keep trailing slash
- {
- endpoint: url.URL{Scheme: "https", Host: "example.com", Path: "/foo"},
- prefix: "/bar",
- key: "/baz/",
- want: url.URL{Scheme: "https", Host: "example.com", Path: "/foo/bar/baz/"},
- },
- }
-
- for i, tt := range tests {
- got := v2KeysURL(tt.endpoint, tt.prefix, tt.key)
- if tt.want != *got {
- t.Errorf("#%d: want=%#v, got=%#v", i, tt.want, *got)
- }
- }
-}
-
-func TestGetAction(t *testing.T) {
- ep := url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}
- baseWantURL := &url.URL{
- Scheme: "http",
- Host: "example.com",
- Path: "/v2/keys/foo/bar",
- }
- wantHeader := http.Header{}
-
- tests := []struct {
- recursive bool
- sorted bool
- quorum bool
- wantQuery string
- }{
- {
- recursive: false,
- sorted: false,
- quorum: false,
- wantQuery: "quorum=false&recursive=false&sorted=false",
- },
- {
- recursive: true,
- sorted: false,
- quorum: false,
- wantQuery: "quorum=false&recursive=true&sorted=false",
- },
- {
- recursive: false,
- sorted: true,
- quorum: false,
- wantQuery: "quorum=false&recursive=false&sorted=true",
- },
- {
- recursive: true,
- sorted: true,
- quorum: false,
- wantQuery: "quorum=false&recursive=true&sorted=true",
- },
- {
- recursive: false,
- sorted: false,
- quorum: true,
- wantQuery: "quorum=true&recursive=false&sorted=false",
- },
- }
-
- for i, tt := range tests {
- f := getAction{
- Key: "/foo/bar",
- Recursive: tt.recursive,
- Sorted: tt.sorted,
- Quorum: tt.quorum,
- }
- got := *f.HTTPRequest(ep)
-
- wantURL := baseWantURL
- wantURL.RawQuery = tt.wantQuery
-
- err := assertRequest(got, "GET", wantURL, wantHeader, nil)
- if err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestWaitAction(t *testing.T) {
- ep := url.URL{Scheme: "http", Host: "example.com", Path: "/v2/keys"}
- baseWantURL := &url.URL{
- Scheme: "http",
- Host: "example.com",
- Path: "/v2/keys/foo/bar",
- }
- wantHeader := http.Header{}
-
- tests := []struct {
- waitIndex uint64
- recursive bool
- wantQuery string
- }{
- {
- recursive: false,
- waitIndex: uint64(0),
- wantQuery: "recursive=false&wait=true&waitIndex=0",
- },
- {
- recursive: false,
- waitIndex: uint64(12),
- wantQuery: "recursive=false&wait=true&waitIndex=12",
- },
- {
- recursive: true,
- waitIndex: uint64(12),
- wantQuery: "recursive=true&wait=true&waitIndex=12",
- },
- }
-
- for i, tt := range tests {
- f := waitAction{
- Key: "/foo/bar",
- WaitIndex: tt.waitIndex,
- Recursive: tt.recursive,
- }
- got := *f.HTTPRequest(ep)
-
- wantURL := baseWantURL
- wantURL.RawQuery = tt.wantQuery
-
- err := assertRequest(got, "GET", wantURL, wantHeader, nil)
- if err != nil {
- t.Errorf("#%d: unexpected error: %#v", i, err)
- }
- }
-}
-
-func TestSetAction(t *testing.T) {
- wantHeader := http.Header(map[string][]string{
- "Content-Type": {"application/x-www-form-urlencoded"},
- })
-
- tests := []struct {
- act setAction
- wantURL string
- wantBody string
- }{
- // default prefix
- {
- act: setAction{
- Prefix: defaultV2KeysPrefix,
- Key: "foo",
- },
- wantURL: "http://example.com/v2/keys/foo",
- wantBody: "value=",
- },
-
- // non-default prefix
- {
- act: setAction{
- Prefix: "/pfx",
- Key: "foo",
- },
- wantURL: "http://example.com/pfx/foo",
- wantBody: "value=",
- },
-
- // no prefix
- {
- act: setAction{
- Key: "foo",
- },
- wantURL: "http://example.com/foo",
- wantBody: "value=",
- },
-
- // Key with path separators
- {
- act: setAction{
- Prefix: defaultV2KeysPrefix,
- Key: "foo/bar/baz",
- },
- wantURL: "http://example.com/v2/keys/foo/bar/baz",
- wantBody: "value=",
- },
-
- // Key with leading slash, Prefix with trailing slash
- {
- act: setAction{
- Prefix: "/foo/",
- Key: "/bar",
- },
- wantURL: "http://example.com/foo/bar",
- wantBody: "value=",
- },
-
- // Key with trailing slash
- {
- act: setAction{
- Key: "/foo/",
- },
- wantURL: "http://example.com/foo/",
- wantBody: "value=",
- },
-
- // Value is set
- {
- act: setAction{
- Key: "foo",
- Value: "baz",
- },
- wantURL: "http://example.com/foo",
- wantBody: "value=baz",
- },
-
- // PrevExist set, but still ignored
- {
- act: setAction{
- Key: "foo",
- PrevExist: PrevIgnore,
- },
- wantURL: "http://example.com/foo",
- wantBody: "value=",
- },
-
- // PrevExist set to true
- {
- act: setAction{
- Key: "foo",
- PrevExist: PrevExist,
- },
- wantURL: "http://example.com/foo?prevExist=true",
- wantBody: "value=",
- },
-
- // PrevExist set to false
- {
- act: setAction{
- Key: "foo",
- PrevExist: PrevNoExist,
- },
- wantURL: "http://example.com/foo?prevExist=false",
- wantBody: "value=",
- },
-
- // PrevValue is urlencoded
- {
- act: setAction{
- Key: "foo",
- PrevValue: "bar baz",
- },
- wantURL: "http://example.com/foo?prevValue=bar+baz",
- wantBody: "value=",
- },
-
- // PrevIndex is set
- {
- act: setAction{
- Key: "foo",
- PrevIndex: uint64(12),
- },
- wantURL: "http://example.com/foo?prevIndex=12",
- wantBody: "value=",
- },
-
- // TTL is set
- {
- act: setAction{
- Key: "foo",
- TTL: 3 * time.Minute,
- },
- wantURL: "http://example.com/foo",
- wantBody: "ttl=180&value=",
- },
-
- // Refresh is set
- {
- act: setAction{
- Key: "foo",
- TTL: 3 * time.Minute,
- Refresh: true,
- },
- wantURL: "http://example.com/foo",
- wantBody: "refresh=true&ttl=180&value=",
- },
-
- // Dir is set
- {
- act: setAction{
- Key: "foo",
- Dir: true,
- },
- wantURL: "http://example.com/foo?dir=true",
- wantBody: "",
- },
- // Dir is set with a value
- {
- act: setAction{
- Key: "foo",
- Value: "bar",
- Dir: true,
- },
- wantURL: "http://example.com/foo?dir=true",
- wantBody: "",
- },
- // Dir is set with PrevExist set to true
- {
- act: setAction{
- Key: "foo",
- PrevExist: PrevExist,
- Dir: true,
- },
- wantURL: "http://example.com/foo?dir=true&prevExist=true",
- wantBody: "",
- },
- // Dir is set with PrevValue
- {
- act: setAction{
- Key: "foo",
- PrevValue: "bar",
- Dir: true,
- },
- wantURL: "http://example.com/foo?dir=true",
- wantBody: "",
- },
- // NoValueOnSuccess is set
- {
- act: setAction{
- Key: "foo",
- NoValueOnSuccess: true,
- },
- wantURL: "http://example.com/foo?noValueOnSuccess=true",
- wantBody: "value=",
- },
- }
-
- for i, tt := range tests {
- u, err := url.Parse(tt.wantURL)
- if err != nil {
- t.Errorf("#%d: unable to use wantURL fixture: %v", i, err)
- }
-
- got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"})
- if err := assertRequest(*got, "PUT", u, wantHeader, []byte(tt.wantBody)); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestCreateInOrderAction(t *testing.T) {
- wantHeader := http.Header(map[string][]string{
- "Content-Type": {"application/x-www-form-urlencoded"},
- })
-
- tests := []struct {
- act createInOrderAction
- wantURL string
- wantBody string
- }{
- // default prefix
- {
- act: createInOrderAction{
- Prefix: defaultV2KeysPrefix,
- Dir: "foo",
- },
- wantURL: "http://example.com/v2/keys/foo",
- wantBody: "value=",
- },
-
- // non-default prefix
- {
- act: createInOrderAction{
- Prefix: "/pfx",
- Dir: "foo",
- },
- wantURL: "http://example.com/pfx/foo",
- wantBody: "value=",
- },
-
- // no prefix
- {
- act: createInOrderAction{
- Dir: "foo",
- },
- wantURL: "http://example.com/foo",
- wantBody: "value=",
- },
-
- // Key with path separators
- {
- act: createInOrderAction{
- Prefix: defaultV2KeysPrefix,
- Dir: "foo/bar/baz",
- },
- wantURL: "http://example.com/v2/keys/foo/bar/baz",
- wantBody: "value=",
- },
-
- // Key with leading slash, Prefix with trailing slash
- {
- act: createInOrderAction{
- Prefix: "/foo/",
- Dir: "/bar",
- },
- wantURL: "http://example.com/foo/bar",
- wantBody: "value=",
- },
-
- // Key with trailing slash
- {
- act: createInOrderAction{
- Dir: "/foo/",
- },
- wantURL: "http://example.com/foo/",
- wantBody: "value=",
- },
-
- // Value is set
- {
- act: createInOrderAction{
- Dir: "foo",
- Value: "baz",
- },
- wantURL: "http://example.com/foo",
- wantBody: "value=baz",
- },
- // TTL is set
- {
- act: createInOrderAction{
- Dir: "foo",
- TTL: 3 * time.Minute,
- },
- wantURL: "http://example.com/foo",
- wantBody: "ttl=180&value=",
- },
- }
-
- for i, tt := range tests {
- u, err := url.Parse(tt.wantURL)
- if err != nil {
- t.Errorf("#%d: unable to use wantURL fixture: %v", i, err)
- }
-
- got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"})
- if err := assertRequest(*got, "POST", u, wantHeader, []byte(tt.wantBody)); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func TestDeleteAction(t *testing.T) {
- wantHeader := http.Header(map[string][]string{
- "Content-Type": {"application/x-www-form-urlencoded"},
- })
-
- tests := []struct {
- act deleteAction
- wantURL string
- }{
- // default prefix
- {
- act: deleteAction{
- Prefix: defaultV2KeysPrefix,
- Key: "foo",
- },
- wantURL: "http://example.com/v2/keys/foo",
- },
-
- // non-default prefix
- {
- act: deleteAction{
- Prefix: "/pfx",
- Key: "foo",
- },
- wantURL: "http://example.com/pfx/foo",
- },
-
- // no prefix
- {
- act: deleteAction{
- Key: "foo",
- },
- wantURL: "http://example.com/foo",
- },
-
- // Key with path separators
- {
- act: deleteAction{
- Prefix: defaultV2KeysPrefix,
- Key: "foo/bar/baz",
- },
- wantURL: "http://example.com/v2/keys/foo/bar/baz",
- },
-
- // Key with leading slash, Prefix with trailing slash
- {
- act: deleteAction{
- Prefix: "/foo/",
- Key: "/bar",
- },
- wantURL: "http://example.com/foo/bar",
- },
-
- // Key with trailing slash
- {
- act: deleteAction{
- Key: "/foo/",
- },
- wantURL: "http://example.com/foo/",
- },
-
- // Recursive set to true
- {
- act: deleteAction{
- Key: "foo",
- Recursive: true,
- },
- wantURL: "http://example.com/foo?recursive=true",
- },
-
- // PrevValue is urlencoded
- {
- act: deleteAction{
- Key: "foo",
- PrevValue: "bar baz",
- },
- wantURL: "http://example.com/foo?prevValue=bar+baz",
- },
-
- // PrevIndex is set
- {
- act: deleteAction{
- Key: "foo",
- PrevIndex: uint64(12),
- },
- wantURL: "http://example.com/foo?prevIndex=12",
- },
- }
-
- for i, tt := range tests {
- u, err := url.Parse(tt.wantURL)
- if err != nil {
- t.Errorf("#%d: unable to use wantURL fixture: %v", i, err)
- }
-
- got := tt.act.HTTPRequest(url.URL{Scheme: "http", Host: "example.com"})
- if err := assertRequest(*got, "DELETE", u, wantHeader, nil); err != nil {
- t.Errorf("#%d: %v", i, err)
- }
- }
-}
-
-func assertRequest(got http.Request, wantMethod string, wantURL *url.URL, wantHeader http.Header, wantBody []byte) error {
- if wantMethod != got.Method {
- return fmt.Errorf("want.Method=%#v got.Method=%#v", wantMethod, got.Method)
- }
-
- if !reflect.DeepEqual(wantURL, got.URL) {
- return fmt.Errorf("want.URL=%#v got.URL=%#v", wantURL, got.URL)
- }
-
- if !reflect.DeepEqual(wantHeader, got.Header) {
- return fmt.Errorf("want.Header=%#v got.Header=%#v", wantHeader, got.Header)
- }
-
- if got.Body == nil {
- if wantBody != nil {
- return fmt.Errorf("want.Body=%v got.Body=%v", wantBody, got.Body)
- }
- } else {
- if wantBody == nil {
- return fmt.Errorf("want.Body=%v got.Body=%s", wantBody, got.Body)
- }
- gotBytes, err := io.ReadAll(got.Body)
- if err != nil {
- return err
- }
-
- if !reflect.DeepEqual(wantBody, gotBytes) {
- return fmt.Errorf("want.Body=%s got.Body=%s", wantBody, gotBytes)
- }
- }
-
- return nil
-}
-
-func TestUnmarshalSuccessfulResponse(t *testing.T) {
- var expiration time.Time
- expiration.UnmarshalText([]byte("2015-04-07T04:40:23.044979686Z"))
-
- tests := []struct {
- indexHdr string
- clusterIDHdr string
- body string
- wantRes *Response
- wantErr bool
- }{
- // Neither PrevNode or Node
- {
- indexHdr: "1",
- body: `{"action":"delete"}`,
- wantRes: &Response{Action: "delete", Index: 1},
- wantErr: false,
- },
-
- // PrevNode
- {
- indexHdr: "15",
- body: `{"action":"delete", "prevNode": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
- wantRes: &Response{
- Action: "delete",
- Index: 15,
- Node: nil,
- PrevNode: &Node{
- Key: "/foo",
- Value: "bar",
- ModifiedIndex: 12,
- CreatedIndex: 10,
- },
- },
- wantErr: false,
- },
-
- // Node
- {
- indexHdr: "15",
- body: `{"action":"get", "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10, "ttl": 10, "expiration": "2015-04-07T04:40:23.044979686Z"}}`,
- wantRes: &Response{
- Action: "get",
- Index: 15,
- Node: &Node{
- Key: "/foo",
- Value: "bar",
- ModifiedIndex: 12,
- CreatedIndex: 10,
- TTL: 10,
- Expiration: &expiration,
- },
- PrevNode: nil,
- },
- wantErr: false,
- },
-
- // Node Dir
- {
- indexHdr: "15",
- clusterIDHdr: "abcdef",
- body: `{"action":"get", "node": {"key": "/foo", "dir": true, "modifiedIndex": 12, "createdIndex": 10}}`,
- wantRes: &Response{
- Action: "get",
- Index: 15,
- Node: &Node{
- Key: "/foo",
- Dir: true,
- ModifiedIndex: 12,
- CreatedIndex: 10,
- },
- PrevNode: nil,
- ClusterID: "abcdef",
- },
- wantErr: false,
- },
-
- // PrevNode and Node
- {
- indexHdr: "15",
- body: `{"action":"update", "prevNode": {"key": "/foo", "value": "baz", "modifiedIndex": 10, "createdIndex": 10}, "node": {"key": "/foo", "value": "bar", "modifiedIndex": 12, "createdIndex": 10}}`,
- wantRes: &Response{
- Action: "update",
- Index: 15,
- PrevNode: &Node{
- Key: "/foo",
- Value: "baz",
- ModifiedIndex: 10,
- CreatedIndex: 10,
- },
- Node: &Node{
- Key: "/foo",
- Value: "bar",
- ModifiedIndex: 12,
- CreatedIndex: 10,
- },
- },
- wantErr: false,
- },
-
- // Garbage in body
- {
- indexHdr: "",
- body: `garbage`,
- wantRes: nil,
- wantErr: true,
- },
-
- // non-integer index
- {
- indexHdr: "poo",
- body: `{}`,
- wantRes: nil,
- wantErr: true,
- },
- }
-
- for i, tt := range tests {
- h := make(http.Header)
- h.Add("X-Etcd-Index", tt.indexHdr)
- res, err := unmarshalSuccessfulKeysResponse(h, []byte(tt.body))
- if tt.wantErr != (err != nil) {
- t.Errorf("#%d: wantErr=%t, err=%v", i, tt.wantErr, err)
- }
-
- if (res == nil) != (tt.wantRes == nil) {
- t.Errorf("#%d: received res=%#v, but expected res=%#v", i, res, tt.wantRes)
- continue
- } else if tt.wantRes == nil {
- // expected and successfully got nil response
- continue
- }
-
- if res.Action != tt.wantRes.Action {
- t.Errorf("#%d: Action=%s, expected %s", i, res.Action, tt.wantRes.Action)
- }
- if res.Index != tt.wantRes.Index {
- t.Errorf("#%d: Index=%d, expected %d", i, res.Index, tt.wantRes.Index)
- }
- if !reflect.DeepEqual(res.Node, tt.wantRes.Node) {
- t.Errorf("#%d: Node=%v, expected %v", i, res.Node, tt.wantRes.Node)
- }
- }
-}
-
-func TestUnmarshalFailedKeysResponse(t *testing.T) {
- body := []byte(`{"errorCode":100,"message":"Key not found","cause":"/foo","index":18}`)
-
- wantErr := Error{
- Code: 100,
- Message: "Key not found",
- Cause: "/foo",
- Index: uint64(18),
- }
-
- gotErr := unmarshalFailedKeysResponse(body)
- if !reflect.DeepEqual(wantErr, gotErr) {
- t.Errorf("unexpected error: want=%#v got=%#v", wantErr, gotErr)
- }
-}
-
-func TestUnmarshalFailedKeysResponseBadJSON(t *testing.T) {
- err := unmarshalFailedKeysResponse([]byte(`{"er`))
- if err == nil {
- t.Errorf("got nil error")
- } else if _, ok := err.(Error); ok {
- t.Errorf("error is of incorrect type *Error: %#v", err)
- }
-}
-
-func TestHTTPWatcherNextWaitAction(t *testing.T) {
- initAction := waitAction{
- Prefix: "/pants",
- Key: "/foo/bar",
- Recursive: true,
- WaitIndex: 19,
- }
-
- client := &actionAssertingHTTPClient{
- t: t,
- act: &initAction,
- resp: http.Response{
- StatusCode: http.StatusOK,
- Header: http.Header{"X-Etcd-Index": []string{"42"}},
- },
- body: []byte(`{"action":"update","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":21,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`),
- }
-
- wantResponse := &Response{
- Action: "update",
- Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(21)},
- PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)},
- Index: uint64(42),
- }
-
- wantNextWait := waitAction{
- Prefix: "/pants",
- Key: "/foo/bar",
- Recursive: true,
- WaitIndex: 22,
- }
-
- watcher := &httpWatcher{
- client: client,
- nextWait: initAction,
- }
-
- resp, err := watcher.Next(context.Background())
- if err != nil {
- t.Errorf("non-nil error: %#v", err)
- }
-
- if !reflect.DeepEqual(wantResponse, resp) {
- t.Errorf("received incorrect Response: want=%#v got=%#v", wantResponse, resp)
- }
-
- if !reflect.DeepEqual(wantNextWait, watcher.nextWait) {
- t.Errorf("nextWait incorrect: want=%#v got=%#v", wantNextWait, watcher.nextWait)
- }
-}
-
-func TestHTTPWatcherNextFail(t *testing.T) {
- tests := []httpClient{
- // generic HTTP client failure
- &staticHTTPClient{
- err: errors.New("fail!"),
- },
-
- // unusable status code
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
-
- // etcd Error response
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusNotFound,
- },
- body: []byte(`{"errorCode":100,"message":"Key not found","cause":"/foo","index":18}`),
- },
- }
-
- for i, tt := range tests {
- act := waitAction{
- Prefix: "/pants",
- Key: "/foo/bar",
- Recursive: true,
- WaitIndex: 19,
- }
-
- watcher := &httpWatcher{
- client: tt,
- nextWait: act,
- }
-
- resp, err := watcher.Next(context.Background())
- if err == nil {
- t.Errorf("#%d: expected non-nil error", i)
- }
- if resp != nil {
- t.Errorf("#%d: expected nil Response, got %#v", i, resp)
- }
- if !reflect.DeepEqual(act, watcher.nextWait) {
- t.Errorf("#%d: nextWait changed: want=%#v got=%#v", i, act, watcher.nextWait)
- }
- }
-}
-
-func TestHTTPKeysAPIWatcherAction(t *testing.T) {
- tests := []struct {
- key string
- opts *WatcherOptions
- want waitAction
- }{
- {
- key: "/foo",
- opts: nil,
- want: waitAction{
- Key: "/foo",
- Recursive: false,
- WaitIndex: 0,
- },
- },
-
- {
- key: "/foo",
- opts: &WatcherOptions{
- Recursive: false,
- AfterIndex: 0,
- },
- want: waitAction{
- Key: "/foo",
- Recursive: false,
- WaitIndex: 0,
- },
- },
-
- {
- key: "/foo",
- opts: &WatcherOptions{
- Recursive: true,
- AfterIndex: 0,
- },
- want: waitAction{
- Key: "/foo",
- Recursive: true,
- WaitIndex: 0,
- },
- },
-
- {
- key: "/foo",
- opts: &WatcherOptions{
- Recursive: false,
- AfterIndex: 19,
- },
- want: waitAction{
- Key: "/foo",
- Recursive: false,
- WaitIndex: 20,
- },
- },
- }
-
- for i, tt := range tests {
- testError := errors.New("fail!")
- kAPI := &httpKeysAPI{
- client: &staticHTTPClient{err: testError},
- }
-
- want := &httpWatcher{
- client: &staticHTTPClient{err: testError},
- nextWait: tt.want,
- }
-
- got := kAPI.Watcher(tt.key, tt.opts)
- if !reflect.DeepEqual(want, got) {
- t.Errorf("#%d: incorrect watcher: want=%#v got=%#v", i, want, got)
- }
- }
-}
-
-func TestHTTPKeysAPISetAction(t *testing.T) {
- tests := []struct {
- key string
- value string
- opts *SetOptions
- wantAction httpAction
- }{
- // nil SetOptions
- {
- key: "/foo",
- value: "bar",
- opts: nil,
- wantAction: &setAction{
- Key: "/foo",
- Value: "bar",
- PrevValue: "",
- PrevIndex: 0,
- PrevExist: PrevIgnore,
- TTL: 0,
- },
- },
- // empty SetOptions
- {
- key: "/foo",
- value: "bar",
- opts: &SetOptions{},
- wantAction: &setAction{
- Key: "/foo",
- Value: "bar",
- PrevValue: "",
- PrevIndex: 0,
- PrevExist: PrevIgnore,
- TTL: 0,
- },
- },
- // populated SetOptions
- {
- key: "/foo",
- value: "bar",
- opts: &SetOptions{
- PrevValue: "baz",
- PrevIndex: 13,
- PrevExist: PrevExist,
- TTL: time.Minute,
- Dir: true,
- },
- wantAction: &setAction{
- Key: "/foo",
- Value: "bar",
- PrevValue: "baz",
- PrevIndex: 13,
- PrevExist: PrevExist,
- TTL: time.Minute,
- Dir: true,
- },
- },
- }
-
- for i, tt := range tests {
- client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction}
- kAPI := httpKeysAPI{client: client}
- kAPI.Set(context.Background(), tt.key, tt.value, tt.opts)
- }
-}
-
-func TestHTTPKeysAPISetError(t *testing.T) {
- tests := []httpClient{
- // generic HTTP client failure
- &staticHTTPClient{
- err: errors.New("fail!"),
- },
-
- // unusable status code
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
-
- // etcd Error response
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusInternalServerError,
- },
- body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`),
- },
- }
-
- for i, tt := range tests {
- kAPI := httpKeysAPI{client: tt}
- resp, err := kAPI.Set(context.Background(), "/foo", "bar", nil)
- if err == nil {
- t.Errorf("#%d: received nil error", i)
- }
- if resp != nil {
- t.Errorf("#%d: received non-nil Response: %#v", i, resp)
- }
- }
-}
-
-func TestHTTPKeysAPISetResponse(t *testing.T) {
- client := &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusOK,
- Header: http.Header{"X-Etcd-Index": []string{"21"}},
- },
- body: []byte(`{"action":"set","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":21,"createdIndex":21},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`),
- }
-
- wantResponse := &Response{
- Action: "set",
- Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(21), ModifiedIndex: uint64(21)},
- PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)},
- Index: uint64(21),
- }
-
- kAPI := &httpKeysAPI{client: client, prefix: "/pants"}
- resp, err := kAPI.Set(context.Background(), "/foo/bar/baz", "snarf", nil)
- if err != nil {
- t.Errorf("non-nil error: %#v", err)
- }
- if !reflect.DeepEqual(wantResponse, resp) {
- t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp)
- }
-}
-
-func TestHTTPKeysAPIGetAction(t *testing.T) {
- tests := []struct {
- key string
- opts *GetOptions
- wantAction httpAction
- }{
- // nil GetOptions
- {
- key: "/foo",
- opts: nil,
- wantAction: &getAction{
- Key: "/foo",
- Sorted: false,
- Recursive: false,
- },
- },
- // empty GetOptions
- {
- key: "/foo",
- opts: &GetOptions{},
- wantAction: &getAction{
- Key: "/foo",
- Sorted: false,
- Recursive: false,
- },
- },
- // populated GetOptions
- {
- key: "/foo",
- opts: &GetOptions{
- Sort: true,
- Recursive: true,
- Quorum: true,
- },
- wantAction: &getAction{
- Key: "/foo",
- Sorted: true,
- Recursive: true,
- Quorum: true,
- },
- },
- }
-
- for i, tt := range tests {
- client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction}
- kAPI := httpKeysAPI{client: client}
- kAPI.Get(context.Background(), tt.key, tt.opts)
- }
-}
-
-func TestHTTPKeysAPIGetError(t *testing.T) {
- tests := []httpClient{
- // generic HTTP client failure
- &staticHTTPClient{
- err: errors.New("fail!"),
- },
-
- // unusable status code
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
-
- // etcd Error response
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusInternalServerError,
- },
- body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`),
- },
- }
-
- for i, tt := range tests {
- kAPI := httpKeysAPI{client: tt}
- resp, err := kAPI.Get(context.Background(), "/foo", nil)
- if err == nil {
- t.Errorf("#%d: received nil error", i)
- }
- if resp != nil {
- t.Errorf("#%d: received non-nil Response: %#v", i, resp)
- }
- }
-}
-
-func TestHTTPKeysAPIGetResponse(t *testing.T) {
- client := &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusOK,
- Header: http.Header{"X-Etcd-Index": []string{"42"}},
- },
- body: []byte(`{"action":"get","node":{"key":"/pants/foo/bar","modifiedIndex":25,"createdIndex":19,"nodes":[{"key":"/pants/foo/bar/baz","value":"snarf","createdIndex":21,"modifiedIndex":25}]}}`),
- }
-
- wantResponse := &Response{
- Action: "get",
- Node: &Node{
- Key: "/pants/foo/bar",
- Nodes: []*Node{
- {Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: 21, ModifiedIndex: 25},
- },
- CreatedIndex: uint64(19),
- ModifiedIndex: uint64(25),
- },
- Index: uint64(42),
- }
-
- kAPI := &httpKeysAPI{client: client, prefix: "/pants"}
- resp, err := kAPI.Get(context.Background(), "/foo/bar", &GetOptions{Recursive: true})
- if err != nil {
- t.Errorf("non-nil error: %#v", err)
- }
- if !reflect.DeepEqual(wantResponse, resp) {
- t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp)
- }
-}
-
-func TestHTTPKeysAPIDeleteAction(t *testing.T) {
- tests := []struct {
- key string
- opts *DeleteOptions
- wantAction httpAction
- }{
- // nil DeleteOptions
- {
- key: "/foo",
- opts: nil,
- wantAction: &deleteAction{
- Key: "/foo",
- PrevValue: "",
- PrevIndex: 0,
- Recursive: false,
- },
- },
- // empty DeleteOptions
- {
- key: "/foo",
- opts: &DeleteOptions{},
- wantAction: &deleteAction{
- Key: "/foo",
- PrevValue: "",
- PrevIndex: 0,
- Recursive: false,
- },
- },
- // populated DeleteOptions
- {
- key: "/foo",
- opts: &DeleteOptions{
- PrevValue: "baz",
- PrevIndex: 13,
- Recursive: true,
- },
- wantAction: &deleteAction{
- Key: "/foo",
- PrevValue: "baz",
- PrevIndex: 13,
- Recursive: true,
- },
- },
- }
-
- for i, tt := range tests {
- client := &actionAssertingHTTPClient{t: t, num: i, act: tt.wantAction}
- kAPI := httpKeysAPI{client: client}
- kAPI.Delete(context.Background(), tt.key, tt.opts)
- }
-}
-
-func TestHTTPKeysAPIDeleteError(t *testing.T) {
- tests := []httpClient{
- // generic HTTP client failure
- &staticHTTPClient{
- err: errors.New("fail!"),
- },
-
- // unusable status code
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusTeapot,
- },
- },
-
- // etcd Error response
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusInternalServerError,
- },
- body: []byte(`{"errorCode":300,"message":"Raft internal error","cause":"/foo","index":18}`),
- },
- }
-
- for i, tt := range tests {
- kAPI := httpKeysAPI{client: tt}
- resp, err := kAPI.Delete(context.Background(), "/foo", nil)
- if err == nil {
- t.Errorf("#%d: received nil error", i)
- }
- if resp != nil {
- t.Errorf("#%d: received non-nil Response: %#v", i, resp)
- }
- }
-}
-
-func TestHTTPKeysAPIDeleteResponse(t *testing.T) {
- client := &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusOK,
- Header: http.Header{"X-Etcd-Index": []string{"22"}},
- },
- body: []byte(`{"action":"delete","node":{"key":"/pants/foo/bar/baz","value":"snarf","modifiedIndex":22,"createdIndex":19},"prevNode":{"key":"/pants/foo/bar/baz","value":"snazz","modifiedIndex":20,"createdIndex":19}}`),
- }
-
- wantResponse := &Response{
- Action: "delete",
- Node: &Node{Key: "/pants/foo/bar/baz", Value: "snarf", CreatedIndex: uint64(19), ModifiedIndex: uint64(22)},
- PrevNode: &Node{Key: "/pants/foo/bar/baz", Value: "snazz", CreatedIndex: uint64(19), ModifiedIndex: uint64(20)},
- Index: uint64(22),
- }
-
- kAPI := &httpKeysAPI{client: client, prefix: "/pants"}
- resp, err := kAPI.Delete(context.Background(), "/foo/bar/baz", nil)
- if err != nil {
- t.Errorf("non-nil error: %#v", err)
- }
- if !reflect.DeepEqual(wantResponse, resp) {
- t.Errorf("incorrect Response: want=%#v got=%#v", wantResponse, resp)
- }
-}
-
-func TestHTTPKeysAPICreateAction(t *testing.T) {
- act := &setAction{
- Key: "/foo",
- Value: "bar",
- PrevExist: PrevNoExist,
- PrevIndex: 0,
- PrevValue: "",
- TTL: 0,
- }
-
- kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}}
- kAPI.Create(context.Background(), "/foo", "bar")
-}
-
-func TestHTTPKeysAPICreateInOrderAction(t *testing.T) {
- act := &createInOrderAction{
- Dir: "/foo",
- Value: "bar",
- TTL: 0,
- }
- kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}}
- kAPI.CreateInOrder(context.Background(), "/foo", "bar", nil)
-}
-
-func TestHTTPKeysAPIUpdateAction(t *testing.T) {
- act := &setAction{
- Key: "/foo",
- Value: "bar",
- PrevExist: PrevExist,
- PrevIndex: 0,
- PrevValue: "",
- TTL: 0,
- }
-
- kAPI := httpKeysAPI{client: &actionAssertingHTTPClient{t: t, act: act}}
- kAPI.Update(context.Background(), "/foo", "bar")
-}
-
-func TestNodeTTLDuration(t *testing.T) {
- tests := []struct {
- node *Node
- want time.Duration
- }{
- {
- node: &Node{TTL: 0},
- want: 0,
- },
- {
- node: &Node{TTL: 97},
- want: 97 * time.Second,
- },
- }
-
- for i, tt := range tests {
- got := tt.node.TTLDuration()
- if tt.want != got {
- t.Errorf("#%d: incorrect duration: want=%v got=%v", i, tt.want, got)
- }
- }
-}
diff --git a/client/v2/main_test.go b/client/v2/main_test.go
deleted file mode 100644
index 2a0195aadd6..00000000000
--- a/client/v2/main_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client_test
-
-import (
- "net/http"
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-func exampleEndpoints() []string { return nil }
-func exampleTransport() *http.Transport { return nil }
-
-func forUnitTestsRunInMockedContext(mocking func(), example func()) {
- mocking()
- // TODO: Call 'example' when mocking() provides realistic mocking of transport.
-
- // The real testing logic of examples gets executed
- // as part of ./tests/integration/client/example/...
-}
-
-func TestMain(m *testing.M) {
- testutil.MustTestMainWithLeakDetection(m)
-}
diff --git a/client/v2/members_test.go b/client/v2/members_test.go
deleted file mode 100644
index ecea78096c8..00000000000
--- a/client/v2/members_test.go
+++ /dev/null
@@ -1,598 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "encoding/json"
- "errors"
- "net/http"
- "net/url"
- "reflect"
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/types"
-)
-
-func TestMembersAPIActionList(t *testing.T) {
- ep := url.URL{Scheme: "http", Host: "example.com"}
- act := &membersAPIActionList{}
-
- wantURL := &url.URL{
- Scheme: "http",
- Host: "example.com",
- Path: "/v2/members",
- }
-
- got := *act.HTTPRequest(ep)
- err := assertRequest(got, "GET", wantURL, http.Header{}, nil)
- if err != nil {
- t.Error(err.Error())
- }
-}
-
-func TestMembersAPIActionAdd(t *testing.T) {
- ep := url.URL{Scheme: "http", Host: "example.com"}
- act := &membersAPIActionAdd{
- peerURLs: types.URLs([]url.URL{
- {Scheme: "https", Host: "127.0.0.1:8081"},
- {Scheme: "http", Host: "127.0.0.1:8080"},
- }),
- }
-
- wantURL := &url.URL{
- Scheme: "http",
- Host: "example.com",
- Path: "/v2/members",
- }
- wantHeader := http.Header{
- "Content-Type": []string{"application/json"},
- }
- wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`)
-
- got := *act.HTTPRequest(ep)
- err := assertRequest(got, "POST", wantURL, wantHeader, wantBody)
- if err != nil {
- t.Error(err.Error())
- }
-}
-
-func TestMembersAPIActionUpdate(t *testing.T) {
- ep := url.URL{Scheme: "http", Host: "example.com"}
- act := &membersAPIActionUpdate{
- memberID: "0xabcd",
- peerURLs: types.URLs([]url.URL{
- {Scheme: "https", Host: "127.0.0.1:8081"},
- {Scheme: "http", Host: "127.0.0.1:8080"},
- }),
- }
-
- wantURL := &url.URL{
- Scheme: "http",
- Host: "example.com",
- Path: "/v2/members/0xabcd",
- }
- wantHeader := http.Header{
- "Content-Type": []string{"application/json"},
- }
- wantBody := []byte(`{"peerURLs":["https://127.0.0.1:8081","http://127.0.0.1:8080"]}`)
-
- got := *act.HTTPRequest(ep)
- err := assertRequest(got, "PUT", wantURL, wantHeader, wantBody)
- if err != nil {
- t.Error(err.Error())
- }
-}
-
-func TestMembersAPIActionRemove(t *testing.T) {
- ep := url.URL{Scheme: "http", Host: "example.com"}
- act := &membersAPIActionRemove{memberID: "XXX"}
-
- wantURL := &url.URL{
- Scheme: "http",
- Host: "example.com",
- Path: "/v2/members/XXX",
- }
-
- got := *act.HTTPRequest(ep)
- err := assertRequest(got, "DELETE", wantURL, http.Header{}, nil)
- if err != nil {
- t.Error(err.Error())
- }
-}
-
-func TestMembersAPIActionLeader(t *testing.T) {
- ep := url.URL{Scheme: "http", Host: "example.com"}
- act := &membersAPIActionLeader{}
-
- wantURL := &url.URL{
- Scheme: "http",
- Host: "example.com",
- Path: "/v2/members/leader",
- }
-
- got := *act.HTTPRequest(ep)
- err := assertRequest(got, "GET", wantURL, http.Header{}, nil)
- if err != nil {
- t.Error(err.Error())
- }
-}
-
-func TestAssertStatusCode(t *testing.T) {
- if err := assertStatusCode(404, 400); err == nil {
- t.Errorf("assertStatusCode failed to detect conflict in 400 vs 404")
- }
-
- if err := assertStatusCode(404, 400, 404); err != nil {
- t.Errorf("assertStatusCode found conflict in (404,400) vs 400: %v", err)
- }
-}
-
-func TestV2MembersURL(t *testing.T) {
- got := v2MembersURL(url.URL{
- Scheme: "http",
- Host: "foo.example.com:4002",
- Path: "/pants",
- })
- want := &url.URL{
- Scheme: "http",
- Host: "foo.example.com:4002",
- Path: "/pants/v2/members",
- }
-
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("v2MembersURL got %#v, want %#v", got, want)
- }
-}
-
-func TestMemberUnmarshal(t *testing.T) {
- tests := []struct {
- body []byte
- wantMember Member
- wantError bool
- }{
- // no URLs, just check ID & Name
- {
- body: []byte(`{"id": "c", "name": "dungarees"}`),
- wantMember: Member{ID: "c", Name: "dungarees", PeerURLs: nil, ClientURLs: nil},
- },
-
- // both client and peer URLs
- {
- body: []byte(`{"peerURLs": ["http://127.0.0.1:2379"], "clientURLs": ["http://127.0.0.1:2379"]}`),
- wantMember: Member{
- PeerURLs: []string{
- "http://127.0.0.1:2379",
- },
- ClientURLs: []string{
- "http://127.0.0.1:2379",
- },
- },
- },
-
- // multiple peer URLs
- {
- body: []byte(`{"peerURLs": ["http://127.0.0.1:2379", "https://example.com"]}`),
- wantMember: Member{
- PeerURLs: []string{
- "http://127.0.0.1:2379",
- "https://example.com",
- },
- ClientURLs: nil,
- },
- },
-
- // multiple client URLs
- {
- body: []byte(`{"clientURLs": ["http://127.0.0.1:2379", "https://example.com"]}`),
- wantMember: Member{
- PeerURLs: nil,
- ClientURLs: []string{
- "http://127.0.0.1:2379",
- "https://example.com",
- },
- },
- },
-
- // invalid JSON
- {
- body: []byte(`{"peerU`),
- wantError: true,
- },
- }
-
- for i, tt := range tests {
- got := Member{}
- err := json.Unmarshal(tt.body, &got)
- if tt.wantError != (err != nil) {
- t.Errorf("#%d: want error %t, got %v", i, tt.wantError, err)
- continue
- }
-
- if !reflect.DeepEqual(tt.wantMember, got) {
- t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.wantMember, got)
- }
- }
-}
-
-func TestMemberCollectionUnmarshalFail(t *testing.T) {
- mc := &memberCollection{}
- if err := mc.UnmarshalJSON([]byte(`{`)); err == nil {
- t.Errorf("got nil error")
- }
-}
-
-func TestMemberCollectionUnmarshal(t *testing.T) {
- tests := []struct {
- body []byte
- want memberCollection
- }{
- {
- body: []byte(`{}`),
- want: memberCollection([]Member{}),
- },
- {
- body: []byte(`{"members":[]}`),
- want: memberCollection([]Member{}),
- },
- {
- body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
- want: memberCollection(
- []Member{
- {
- ID: "2745e2525fce8fe",
- Name: "node3",
- PeerURLs: []string{
- "http://127.0.0.1:7003",
- },
- ClientURLs: []string{
- "http://127.0.0.1:4003",
- },
- },
- {
- ID: "42134f434382925",
- Name: "node1",
- PeerURLs: []string{
- "http://127.0.0.1:2380",
- "http://127.0.0.1:7001",
- },
- ClientURLs: []string{
- "http://127.0.0.1:2379",
- "http://127.0.0.1:4001",
- },
- },
- {
- ID: "94088180e21eb87b",
- Name: "node2",
- PeerURLs: []string{
- "http://127.0.0.1:7002",
- },
- ClientURLs: []string{
- "http://127.0.0.1:4002",
- },
- },
- },
- ),
- },
- }
-
- for i, tt := range tests {
- var got memberCollection
- err := json.Unmarshal(tt.body, &got)
- if err != nil {
- t.Errorf("#%d: unexpected error: %v", i, err)
- continue
- }
-
- if !reflect.DeepEqual(tt.want, got) {
- t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got)
- }
- }
-}
-
-func TestMemberCreateRequestMarshal(t *testing.T) {
- req := memberCreateOrUpdateRequest{
- PeerURLs: types.URLs([]url.URL{
- {Scheme: "http", Host: "127.0.0.1:8081"},
- {Scheme: "https", Host: "127.0.0.1:8080"},
- }),
- }
- want := []byte(`{"peerURLs":["http://127.0.0.1:8081","https://127.0.0.1:8080"]}`)
-
- got, err := json.Marshal(&req)
- if err != nil {
- t.Fatalf("Marshal returned unexpected err=%v", err)
- }
-
- if !reflect.DeepEqual(want, got) {
- t.Fatalf("Failed to marshal memberCreateRequest: want=%s, got=%s", want, got)
- }
-}
-
-func TestHTTPMembersAPIAddSuccess(t *testing.T) {
- wantAction := &membersAPIActionAdd{
- peerURLs: types.URLs([]url.URL{
- {Scheme: "http", Host: "127.0.0.1:7002"},
- }),
- }
-
- mAPI := &httpMembersAPI{
- client: &actionAssertingHTTPClient{
- t: t,
- act: wantAction,
- resp: http.Response{
- StatusCode: http.StatusCreated,
- },
- body: []byte(`{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"]}`),
- },
- }
-
- wantResponseMember := &Member{
- ID: "94088180e21eb87b",
- PeerURLs: []string{"http://127.0.0.1:7002"},
- }
-
- m, err := mAPI.Add(context.Background(), "http://127.0.0.1:7002")
- if err != nil {
- t.Errorf("got non-nil err: %#v", err)
- }
- if !reflect.DeepEqual(wantResponseMember, m) {
- t.Errorf("incorrect Member: want=%#v got=%#v", wantResponseMember, m)
- }
-}
-
-func TestHTTPMembersAPIAddError(t *testing.T) {
- okPeer := "http://example.com:2379"
- tests := []struct {
- peerURL string
- client httpClient
-
- // if wantErr == nil, assert that the returned error is non-nil
- // if wantErr != nil, assert that the returned error matches
- wantErr error
- }{
- // malformed peer URL
- {
- peerURL: ":",
- },
-
- // generic httpClient failure
- {
- peerURL: okPeer,
- client: &staticHTTPClient{err: errors.New("fail!")},
- },
-
- // unrecognized HTTP status code
- {
- peerURL: okPeer,
- client: &staticHTTPClient{
- resp: http.Response{StatusCode: http.StatusTeapot},
- },
- },
-
- // unmarshal body into membersError on StatusConflict
- {
- peerURL: okPeer,
- client: &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusConflict,
- },
- body: []byte(`{"message":"fail!"}`),
- },
- wantErr: membersError{Message: "fail!"},
- },
-
- // fail to unmarshal body on StatusConflict
- {
- peerURL: okPeer,
- client: &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusConflict,
- },
- body: []byte(`{"`),
- },
- },
-
- // fail to unmarshal body on StatusCreated
- {
- peerURL: okPeer,
- client: &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusCreated,
- },
- body: []byte(`{"id":"XX`),
- },
- },
- }
-
- for i, tt := range tests {
- mAPI := &httpMembersAPI{client: tt.client}
- m, err := mAPI.Add(context.Background(), tt.peerURL)
- if err == nil {
- t.Errorf("#%d: got nil err", i)
- }
- if tt.wantErr != nil && !reflect.DeepEqual(tt.wantErr, err) {
- t.Errorf("#%d: incorrect error: want=%#v got=%#v", i, tt.wantErr, err)
- }
- if m != nil {
- t.Errorf("#%d: got non-nil Member", i)
- }
- }
-}
-
-func TestHTTPMembersAPIRemoveSuccess(t *testing.T) {
- wantAction := &membersAPIActionRemove{
- memberID: "94088180e21eb87b",
- }
-
- mAPI := &httpMembersAPI{
- client: &actionAssertingHTTPClient{
- t: t,
- act: wantAction,
- resp: http.Response{
- StatusCode: http.StatusNoContent,
- },
- },
- }
-
- if err := mAPI.Remove(context.Background(), "94088180e21eb87b"); err != nil {
- t.Errorf("got non-nil err: %#v", err)
- }
-}
-
-func TestHTTPMembersAPIRemoveFail(t *testing.T) {
- tests := []httpClient{
- // generic error
- &staticHTTPClient{
- err: errors.New("fail!"),
- },
-
- // unexpected HTTP status code
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusInternalServerError,
- },
- },
- }
-
- for i, tt := range tests {
- mAPI := &httpMembersAPI{client: tt}
- if err := mAPI.Remove(context.Background(), "94088180e21eb87b"); err == nil {
- t.Errorf("#%d: got nil err", i)
- }
- }
-}
-
-func TestHTTPMembersAPIListSuccess(t *testing.T) {
- wantAction := &membersAPIActionList{}
- mAPI := &httpMembersAPI{
- client: &actionAssertingHTTPClient{
- t: t,
- act: wantAction,
- resp: http.Response{
- StatusCode: http.StatusOK,
- },
- body: []byte(`{"members":[{"id":"94088180e21eb87b","name":"node2","peerURLs":["http://127.0.0.1:7002"],"clientURLs":["http://127.0.0.1:4002"]}]}`),
- },
- }
-
- wantResponseMembers := []Member{
- {
- ID: "94088180e21eb87b",
- Name: "node2",
- PeerURLs: []string{"http://127.0.0.1:7002"},
- ClientURLs: []string{"http://127.0.0.1:4002"},
- },
- }
-
- m, err := mAPI.List(context.Background())
- if err != nil {
- t.Errorf("got non-nil err: %#v", err)
- }
- if !reflect.DeepEqual(wantResponseMembers, m) {
- t.Errorf("incorrect Members: want=%#v got=%#v", wantResponseMembers, m)
- }
-}
-
-func TestHTTPMembersAPIListError(t *testing.T) {
- tests := []httpClient{
- // generic httpClient failure
- &staticHTTPClient{err: errors.New("fail!")},
-
- // unrecognized HTTP status code
- &staticHTTPClient{
- resp: http.Response{StatusCode: http.StatusTeapot},
- },
-
- // fail to unmarshal body on StatusOK
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusOK,
- },
- body: []byte(`[{"id":"XX`),
- },
- }
-
- for i, tt := range tests {
- mAPI := &httpMembersAPI{client: tt}
- ms, err := mAPI.List(context.Background())
- if err == nil {
- t.Errorf("#%d: got nil err", i)
- }
- if ms != nil {
- t.Errorf("#%d: got non-nil Member slice", i)
- }
- }
-}
-
-func TestHTTPMembersAPILeaderSuccess(t *testing.T) {
- wantAction := &membersAPIActionLeader{}
- mAPI := &httpMembersAPI{
- client: &actionAssertingHTTPClient{
- t: t,
- act: wantAction,
- resp: http.Response{
- StatusCode: http.StatusOK,
- },
- body: []byte(`{"id":"94088180e21eb87b","name":"node2","peerURLs":["http://127.0.0.1:7002"],"clientURLs":["http://127.0.0.1:4002"]}`),
- },
- }
-
- wantResponseMember := &Member{
- ID: "94088180e21eb87b",
- Name: "node2",
- PeerURLs: []string{"http://127.0.0.1:7002"},
- ClientURLs: []string{"http://127.0.0.1:4002"},
- }
-
- m, err := mAPI.Leader(context.Background())
- if err != nil {
- t.Errorf("err = %v, want %v", err, nil)
- }
- if !reflect.DeepEqual(wantResponseMember, m) {
- t.Errorf("incorrect member: member = %v, want %v", wantResponseMember, m)
- }
-}
-
-func TestHTTPMembersAPILeaderError(t *testing.T) {
- tests := []httpClient{
- // generic httpClient failure
- &staticHTTPClient{err: errors.New("fail!")},
-
- // unrecognized HTTP status code
- &staticHTTPClient{
- resp: http.Response{StatusCode: http.StatusTeapot},
- },
-
- // fail to unmarshal body on StatusOK
- &staticHTTPClient{
- resp: http.Response{
- StatusCode: http.StatusOK,
- },
- body: []byte(`[{"id":"XX`),
- },
- }
-
- for i, tt := range tests {
- mAPI := &httpMembersAPI{client: tt}
- m, err := mAPI.Leader(context.Background())
- if err == nil {
- t.Errorf("#%d: err = nil, want not nil", i)
- }
- if m != nil {
- t.Errorf("member slice = %v, want nil", m)
- }
- }
-}
diff --git a/client/v2/util.go b/client/v2/util.go
deleted file mode 100644
index 15a8babff4d..00000000000
--- a/client/v2/util.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "regexp"
-)
-
-var (
- roleNotFoundRegExp *regexp.Regexp
- userNotFoundRegExp *regexp.Regexp
-)
-
-func init() {
- roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
- userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
-}
-
-// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
-func IsKeyNotFound(err error) bool {
- if cErr, ok := err.(Error); ok {
- return cErr.Code == ErrorCodeKeyNotFound
- }
- return false
-}
-
-// IsRoleNotFound returns true if the error means role not found of v2 API.
-func IsRoleNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return roleNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
-
-// IsUserNotFound returns true if the error means user not found of v2 API.
-func IsUserNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return userNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
diff --git a/client/v3/LICENSE b/client/v3/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/client/v3/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/client/v3/README.md b/client/v3/README.md
deleted file mode 100644
index af0087ebcc0..00000000000
--- a/client/v3/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-# etcd/client/v3
-
-[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs)
-[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/client/v3)
-
-`etcd/clientv3` is the official Go etcd client for v3.
-
-## Install
-
-```bash
-go get go.etcd.io/etcd/client/v3
-```
-
-## Get started
-
-Create client using `clientv3.New`:
-
-```go
-import clientv3 "go.etcd.io/etcd/client/v3"
-
-func main() {
- cli, err := clientv3.New(clientv3.Config{
- Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
- DialTimeout: 5 * time.Second,
- })
- if err != nil {
- // handle error!
- }
- defer cli.Close()
-}
-```
-
-etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses
-[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
-If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
-pass `context.WithTimeout` to APIs:
-
-```go
-ctx, cancel := context.WithTimeout(context.Background(), timeout)
-resp, err := cli.Put(ctx, "sample_key", "sample_value")
-cancel()
-if err != nil {
- // handle error!
-}
-// use the response
-```
-
-For full compatibility, it is recommended to install released versions of clients using go modules.
-
-## Error Handling
-
-etcd client returns 2 types of errors:
-
-1. context error: canceled or deadline exceeded.
-2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/api/v3rpc/rpctypes).
-
-Here is the example code to handle client errors:
-
-```go
-resp, err := cli.Put(ctx, "", "")
-if err != nil {
- switch err {
- case context.Canceled:
- log.Fatalf("ctx is canceled by another routine: %v", err)
- case context.DeadlineExceeded:
- log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
- case rpctypes.ErrEmptyKey:
- log.Fatalf("client-side error: %v", err)
- default:
- log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
- }
-}
-```
-
-## Metrics
-
-The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/main/tests/integration/clientv3/examples/example_metrics_test.go).
-
-## Namespacing
-
-The [namespace](https://godoc.org/go.etcd.io/etcd/client/v3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
-
-## Request size limit
-
-Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`.
-
-## Examples
-
-More code [examples](https://github.com/etcd-io/etcd/tree/main/tests/integration/clientv3/examples) can be found at [GoDoc](https://pkg.go.dev/go.etcd.io/etcd/client/v3).
diff --git a/client/v3/auth.go b/client/v3/auth.go
deleted file mode 100644
index ae85ec9a942..00000000000
--- a/client/v3/auth.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "fmt"
- "strings"
-
- "google.golang.org/grpc"
-
- "go.etcd.io/etcd/api/v3/authpb"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-)
-
-type (
- AuthEnableResponse pb.AuthEnableResponse
- AuthDisableResponse pb.AuthDisableResponse
- AuthStatusResponse pb.AuthStatusResponse
- AuthenticateResponse pb.AuthenticateResponse
- AuthUserAddResponse pb.AuthUserAddResponse
- AuthUserDeleteResponse pb.AuthUserDeleteResponse
- AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
- AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
- AuthUserGetResponse pb.AuthUserGetResponse
- AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
- AuthRoleAddResponse pb.AuthRoleAddResponse
- AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
- AuthRoleGetResponse pb.AuthRoleGetResponse
- AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
- AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
- AuthUserListResponse pb.AuthUserListResponse
- AuthRoleListResponse pb.AuthRoleListResponse
-
- PermissionType authpb.Permission_Type
- Permission authpb.Permission
-)
-
-const (
- PermRead = authpb.READ
- PermWrite = authpb.WRITE
- PermReadWrite = authpb.READWRITE
-)
-
-type UserAddOptions authpb.UserAddOptions
-
-type Auth interface {
- // Authenticate login and get token
- Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error)
-
- // AuthEnable enables auth of an etcd cluster.
- AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
-
- // AuthDisable disables auth of an etcd cluster.
- AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
-
- // AuthStatus returns the status of auth of an etcd cluster.
- AuthStatus(ctx context.Context) (*AuthStatusResponse, error)
-
- // UserAdd adds a new user to an etcd cluster.
- UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
-
- // UserAddWithOptions adds a new user to an etcd cluster with some options.
- UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error)
-
- // UserDelete deletes a user from an etcd cluster.
- UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
-
- // UserChangePassword changes a password of a user.
- UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
-
- // UserGrantRole grants a role to a user.
- UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
-
- // UserGet gets a detailed information of a user.
- UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
-
- // UserList gets a list of all users.
- UserList(ctx context.Context) (*AuthUserListResponse, error)
-
- // UserRevokeRole revokes a role of a user.
- UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
-
- // RoleAdd adds a new role to an etcd cluster.
- RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
-
- // RoleGrantPermission grants a permission to a role.
- RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
-
- // RoleGet gets a detailed information of a role.
- RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
-
- // RoleList gets a list of all roles.
- RoleList(ctx context.Context) (*AuthRoleListResponse, error)
-
- // RoleRevokePermission revokes a permission from a role.
- RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
-
- // RoleDelete deletes a role.
- RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
-}
-
-type authClient struct {
- remote pb.AuthClient
- callOpts []grpc.CallOption
-}
-
-func NewAuth(c *Client) Auth {
- api := &authClient{remote: RetryAuthClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth {
- api := &authClient{remote: remote}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
- resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthenticateResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
- resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
- return (*AuthEnableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
- resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
- return (*AuthDisableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) {
- resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...)
- return (*AuthStatusResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
- resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...)
- return (*AuthUserAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) {
- resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...)
- return (*AuthUserAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
- resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
- return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
- resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
- resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
- return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
- resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
- return (*AuthUserGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) {
- resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
- return (*AuthUserListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
- resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
- return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
- resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
- return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
- perm := &authpb.Permission{
- Key: []byte(key),
- RangeEnd: []byte(rangeEnd),
- PermType: authpb.Permission_Type(permType),
- }
- resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
- return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
- resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
- resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
- return (*AuthRoleListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
- resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...)
- return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
- resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func StrToPermissionType(s string) (PermissionType, error) {
- val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
- if ok {
- return PermissionType(val), nil
- }
- return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
-}
diff --git a/client/v3/client.go b/client/v3/client.go
deleted file mode 100644
index 0c91889fa33..00000000000
--- a/client/v3/client.go
+++ /dev/null
@@ -1,619 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "errors"
- "fmt"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- grpccredentials "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/credentials/insecure"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/status"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/pkg/v3/logutil"
- "go.etcd.io/etcd/client/v3/credentials"
- "go.etcd.io/etcd/client/v3/internal/endpoint"
- "go.etcd.io/etcd/client/v3/internal/resolver"
-)
-
-var (
- ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
- ErrOldCluster = errors.New("etcdclient: old cluster version")
-)
-
-// Client provides and manages an etcd v3 client session.
-type Client struct {
- Cluster
- KV
- Lease
- Watcher
- Auth
- Maintenance
-
- conn *grpc.ClientConn
-
- cfg Config
- creds grpccredentials.TransportCredentials
- resolver *resolver.EtcdManualResolver
-
- epMu *sync.RWMutex
- endpoints []string
-
- ctx context.Context
- cancel context.CancelFunc
-
- // Username is a user name for authentication.
- Username string
- // Password is a password for authentication.
- Password string
- authTokenBundle credentials.Bundle
-
- callOpts []grpc.CallOption
-
- lgMu *sync.RWMutex
- lg *zap.Logger
-}
-
-// New creates a new etcdv3 client from a given configuration.
-func New(cfg Config) (*Client, error) {
- if len(cfg.Endpoints) == 0 {
- return nil, ErrNoAvailableEndpoints
- }
-
- return newClient(&cfg)
-}
-
-// NewCtxClient creates a client with a context but no underlying grpc
-// connection. This is useful for embedded cases that override the
-// service interface implementations and do not need connection management.
-func NewCtxClient(ctx context.Context, opts ...Option) *Client {
- cctx, cancel := context.WithCancel(ctx)
- c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex)}
- for _, opt := range opts {
- opt(c)
- }
- if c.lg == nil {
- c.lg = zap.NewNop()
- }
- return c
-}
-
-// Option is a function type that can be passed as argument to NewCtxClient to configure client
-type Option func(*Client)
-
-// NewFromURL creates a new etcdv3 client from a URL.
-func NewFromURL(url string) (*Client, error) {
- return New(Config{Endpoints: []string{url}})
-}
-
-// NewFromURLs creates a new etcdv3 client from URLs.
-func NewFromURLs(urls []string) (*Client, error) {
- return New(Config{Endpoints: urls})
-}
-
-// WithZapLogger is a NewCtxClient option that overrides the logger
-func WithZapLogger(lg *zap.Logger) Option {
- return func(c *Client) {
- c.lg = lg
- }
-}
-
-// WithLogger overrides the logger.
-//
-// Deprecated: Please use WithZapLogger or Logger field in clientv3.Config
-//
-// Does not changes grpcLogger, that can be explicitly configured
-// using grpc_zap.ReplaceGrpcLoggerV2(..) method.
-func (c *Client) WithLogger(lg *zap.Logger) *Client {
- c.lgMu.Lock()
- c.lg = lg
- c.lgMu.Unlock()
- return c
-}
-
-// GetLogger gets the logger.
-// NOTE: This method is for internal use of etcd-client library and should not be used as general-purpose logger.
-func (c *Client) GetLogger() *zap.Logger {
- c.lgMu.RLock()
- l := c.lg
- c.lgMu.RUnlock()
- return l
-}
-
-// Close shuts down the client's etcd connections.
-func (c *Client) Close() error {
- c.cancel()
- if c.Watcher != nil {
- c.Watcher.Close()
- }
- if c.Lease != nil {
- c.Lease.Close()
- }
- if c.conn != nil {
- return toErr(c.ctx, c.conn.Close())
- }
- return c.ctx.Err()
-}
-
-// Ctx is a context for "out of band" messages (e.g., for sending
-// "clean up" message when another context is canceled). It is
-// canceled on client Close().
-func (c *Client) Ctx() context.Context { return c.ctx }
-
-// Endpoints lists the registered endpoints for the client.
-func (c *Client) Endpoints() []string {
- // copy the slice; protect original endpoints from being changed
- c.epMu.RLock()
- defer c.epMu.RUnlock()
- eps := make([]string, len(c.endpoints))
- copy(eps, c.endpoints)
- return eps
-}
-
-// SetEndpoints updates client's endpoints.
-func (c *Client) SetEndpoints(eps ...string) {
- c.epMu.Lock()
- defer c.epMu.Unlock()
- c.endpoints = eps
-
- c.resolver.SetEndpoints(eps)
-}
-
-// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
-func (c *Client) Sync(ctx context.Context) error {
- mresp, err := c.MemberList(ctx)
- if err != nil {
- return err
- }
- var eps []string
- for _, m := range mresp.Members {
- if len(m.Name) != 0 && !m.IsLearner {
- eps = append(eps, m.ClientURLs...)
- }
- }
- c.SetEndpoints(eps...)
- c.lg.Debug("set etcd endpoints by autoSync", zap.Strings("endpoints", eps))
- return nil
-}
-
-func (c *Client) autoSync() {
- if c.cfg.AutoSyncInterval == time.Duration(0) {
- return
- }
-
- for {
- select {
- case <-c.ctx.Done():
- return
- case <-time.After(c.cfg.AutoSyncInterval):
- ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
- err := c.Sync(ctx)
- cancel()
- if err != nil && err != c.ctx.Err() {
- c.lg.Info("Auto sync endpoints failed.", zap.Error(err))
- }
- }
- }
-}
-
-// dialSetupOpts gives the dial opts prior to any authentication.
-func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
- if c.cfg.DialKeepAliveTime > 0 {
- params := keepalive.ClientParameters{
- Time: c.cfg.DialKeepAliveTime,
- Timeout: c.cfg.DialKeepAliveTimeout,
- PermitWithoutStream: c.cfg.PermitWithoutStream,
- }
- opts = append(opts, grpc.WithKeepaliveParams(params))
- }
- opts = append(opts, dopts...)
-
- if creds != nil {
- opts = append(opts, grpc.WithTransportCredentials(creds))
- } else {
- opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
- }
-
- // Interceptor retry and backoff.
- // TODO: Replace all of clientv3/retry.go with RetryPolicy:
- // https://github.com/grpc/grpc-proto/blob/cdd9ed5c3d3f87aef62f373b93361cf7bddc620d/grpc/service_config/service_config.proto#L130
- rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
- opts = append(opts,
- // Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
- // Streams that are safe to retry are enabled individually.
- grpc.WithStreamInterceptor(c.streamClientInterceptor(withMax(0), rrBackoff)),
- grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)),
- )
-
- return opts, nil
-}
-
-// Dial connects to a single endpoint using the client's config.
-func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
- creds := c.credentialsForEndpoint(ep)
-
- // Using ad-hoc created resolver, to guarantee only explicitly given
- // endpoint is used.
- return c.dial(creds, grpc.WithResolvers(resolver.New(ep)))
-}
-
-func (c *Client) getToken(ctx context.Context) error {
- var err error // return last error in a case of fail
-
- if c.Username == "" || c.Password == "" {
- return nil
- }
-
- resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password)
- if err != nil {
- if err == rpctypes.ErrAuthNotEnabled {
- c.authTokenBundle.UpdateAuthToken("")
- return nil
- }
- return err
- }
- c.authTokenBundle.UpdateAuthToken(resp.Token)
- return nil
-}
-
-// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
-// of the provided endpoint determines the scheme used for all endpoints of the client connection.
-func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- creds := c.credentialsForEndpoint(c.Endpoints()[0])
- opts := append(dopts, grpc.WithResolvers(c.resolver))
- return c.dial(creds, opts...)
-}
-
-// dial configures and dials any grpc balancer target.
-func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- opts, err := c.dialSetupOpts(creds, dopts...)
- if err != nil {
- return nil, fmt.Errorf("failed to configure dialer: %v", err)
- }
- if c.authTokenBundle != nil {
- opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
- }
-
- opts = append(opts, c.cfg.DialOptions...)
-
- dctx := c.ctx
- if c.cfg.DialTimeout > 0 {
- var cancel context.CancelFunc
- dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
- defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
- }
- target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.endpoints[0]))
- conn, err := grpc.DialContext(dctx, target, opts...)
- if err != nil {
- return nil, err
- }
- return conn, nil
-}
-
-func authority(endpoint string) string {
- spl := strings.SplitN(endpoint, "://", 2)
- if len(spl) < 2 {
- if strings.HasPrefix(endpoint, "unix:") {
- return endpoint[len("unix:"):]
- }
- if strings.HasPrefix(endpoint, "unixs:") {
- return endpoint[len("unixs:"):]
- }
- return endpoint
- }
- return spl[1]
-}
-
-func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials {
- r := endpoint.RequiresCredentials(ep)
- switch r {
- case endpoint.CREDS_DROP:
- return nil
- case endpoint.CREDS_OPTIONAL:
- return c.creds
- case endpoint.CREDS_REQUIRE:
- if c.creds != nil {
- return c.creds
- }
- return credentials.NewBundle(credentials.Config{}).TransportCredentials()
- default:
- panic(fmt.Errorf("unsupported CredsRequirement: %v", r))
- }
-}
-
-func newClient(cfg *Config) (*Client, error) {
- if cfg == nil {
- cfg = &Config{}
- }
- var creds grpccredentials.TransportCredentials
- if cfg.TLS != nil {
- creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
- }
-
- // use a temporary skeleton client to bootstrap first connection
- baseCtx := context.TODO()
- if cfg.Context != nil {
- baseCtx = cfg.Context
- }
-
- ctx, cancel := context.WithCancel(baseCtx)
- client := &Client{
- conn: nil,
- cfg: *cfg,
- creds: creds,
- ctx: ctx,
- cancel: cancel,
- epMu: new(sync.RWMutex),
- callOpts: defaultCallOpts,
- lgMu: new(sync.RWMutex),
- }
-
- var err error
- if cfg.Logger != nil {
- client.lg = cfg.Logger
- } else if cfg.LogConfig != nil {
- client.lg, err = cfg.LogConfig.Build()
- } else {
- client.lg, err = logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
- if client.lg != nil {
- client.lg = client.lg.Named("etcd-client")
- }
- }
- if err != nil {
- return nil, err
- }
-
- if cfg.Username != "" && cfg.Password != "" {
- client.Username = cfg.Username
- client.Password = cfg.Password
- client.authTokenBundle = credentials.NewBundle(credentials.Config{})
- }
- if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
- if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
- return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
- }
- callOpts := []grpc.CallOption{
- defaultWaitForReady,
- defaultMaxCallSendMsgSize,
- defaultMaxCallRecvMsgSize,
- }
- if cfg.MaxCallSendMsgSize > 0 {
- callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
- }
- if cfg.MaxCallRecvMsgSize > 0 {
- callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
- }
- client.callOpts = callOpts
- }
-
- client.resolver = resolver.New(cfg.Endpoints...)
-
- if len(cfg.Endpoints) < 1 {
- client.cancel()
- return nil, errors.New("at least one Endpoint is required in client config")
- }
- client.SetEndpoints(cfg.Endpoints...)
-
- // Use a provided endpoint target so that for https:// without any tls config given, then
- // grpc will assume the certificate server name is the endpoint host.
- conn, err := client.dialWithBalancer()
- if err != nil {
- client.cancel()
- client.resolver.Close()
- // TODO: Error like `fmt.Errorf(dialing [%s] failed: %v, strings.Join(cfg.Endpoints, ";"), err)` would help with debugging a lot.
- return nil, err
- }
- client.conn = conn
-
- client.Cluster = NewCluster(client)
- client.KV = NewKV(client)
- client.Lease = NewLease(client)
- client.Watcher = NewWatcher(client)
- client.Auth = NewAuth(client)
- client.Maintenance = NewMaintenance(client)
-
- //get token with established connection
- ctx, cancel = client.ctx, func() {}
- if client.cfg.DialTimeout > 0 {
- ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout)
- }
- err = client.getToken(ctx)
- if err != nil {
- client.Close()
- cancel()
- //TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err)
- return nil, err
- }
- cancel()
-
- if cfg.RejectOldCluster {
- if err := client.checkVersion(); err != nil {
- client.Close()
- return nil, err
- }
- }
-
- go client.autoSync()
- return client, nil
-}
-
-// roundRobinQuorumBackoff retries against quorum between each backoff.
-// This is intended for use with a round robin load balancer.
-func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
- return func(attempt uint) time.Duration {
- // after each round robin across quorum, backoff for our wait between duration
- n := uint(len(c.Endpoints()))
- quorum := (n/2 + 1)
- if attempt%quorum == 0 {
- c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
- return jitterUp(waitBetween, jitterFraction)
- }
- c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
- return 0
- }
-}
-
-func (c *Client) checkVersion() (err error) {
- var wg sync.WaitGroup
-
- eps := c.Endpoints()
- errc := make(chan error, len(eps))
- ctx, cancel := context.WithCancel(c.ctx)
- if c.cfg.DialTimeout > 0 {
- cancel()
- ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
- }
-
- wg.Add(len(eps))
- for _, ep := range eps {
- // if cluster is current, any endpoint gives a recent version
- go func(e string) {
- defer wg.Done()
- resp, rerr := c.Status(ctx, e)
- if rerr != nil {
- errc <- rerr
- return
- }
- vs := strings.Split(resp.Version, ".")
- maj, min := 0, 0
- if len(vs) >= 2 {
- var serr error
- if maj, serr = strconv.Atoi(vs[0]); serr != nil {
- errc <- serr
- return
- }
- if min, serr = strconv.Atoi(vs[1]); serr != nil {
- errc <- serr
- return
- }
- }
- if maj < 3 || (maj == 3 && min < 4) {
- rerr = ErrOldCluster
- }
- errc <- rerr
- }(ep)
- }
- // wait for success
- for range eps {
- if err = <-errc; err != nil {
- break
- }
- }
- cancel()
- wg.Wait()
- return err
-}
-
-// ActiveConnection returns the current in-use connection
-func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
-
-// isHaltErr returns true if the given error and context indicate no forward
-// progress can be made, even after reconnecting.
-func isHaltErr(ctx context.Context, err error) bool {
- if ctx != nil && ctx.Err() != nil {
- return true
- }
- if err == nil {
- return false
- }
- ev, _ := status.FromError(err)
- // Unavailable codes mean the system will be right back.
- // (e.g., can't connect, lost leader)
- // Treat Internal codes as if something failed, leaving the
- // system in an inconsistent state, but retrying could make progress.
- // (e.g., failed in middle of send, corrupted frame)
- // TODO: are permanent Internal errors possible from grpc?
- return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
-}
-
-// isUnavailableErr returns true if the given error is an unavailable error
-func isUnavailableErr(ctx context.Context, err error) bool {
- if ctx != nil && ctx.Err() != nil {
- return false
- }
- if err == nil {
- return false
- }
- ev, ok := status.FromError(err)
- if ok {
- // Unavailable codes mean the system will be right back.
- // (e.g., can't connect, lost leader)
- return ev.Code() == codes.Unavailable
- }
- return false
-}
-
-func toErr(ctx context.Context, err error) error {
- if err == nil {
- return nil
- }
- err = rpctypes.Error(err)
- if _, ok := err.(rpctypes.EtcdError); ok {
- return err
- }
- if ev, ok := status.FromError(err); ok {
- code := ev.Code()
- switch code {
- case codes.DeadlineExceeded:
- fallthrough
- case codes.Canceled:
- if ctx.Err() != nil {
- err = ctx.Err()
- }
- }
- }
- return err
-}
-
-func canceledByCaller(stopCtx context.Context, err error) bool {
- if stopCtx.Err() == nil || err == nil {
- return false
- }
-
- return err == context.Canceled || err == context.DeadlineExceeded
-}
-
-// IsConnCanceled returns true, if error is from a closed gRPC connection.
-// ref. https://github.com/grpc/grpc-go/pull/1854
-func IsConnCanceled(err error) bool {
- if err == nil {
- return false
- }
-
- // >= gRPC v1.23.x
- s, ok := status.FromError(err)
- if ok {
- // connection is canceled or server has already closed the connection
- return s.Code() == codes.Canceled || s.Message() == "transport is closing"
- }
-
- // >= gRPC v1.10.x
- if err == context.Canceled {
- return true
- }
-
- // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
- return strings.Contains(err.Error(), "grpc: the client connection is closing")
-}
diff --git a/client/v3/client_test.go b/client/v3/client_test.go
deleted file mode 100644
index 0f52ad5d375..00000000000
--- a/client/v3/client_test.go
+++ /dev/null
@@ -1,451 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net"
- "sync"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zaptest"
-
- "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-
- "google.golang.org/grpc"
-)
-
-func NewClient(t *testing.T, cfg Config) (*Client, error) {
- if cfg.Logger == nil {
- cfg.Logger = zaptest.NewLogger(t).Named("client")
- }
- return New(cfg)
-}
-
-func TestDialCancel(t *testing.T) {
- testutil.RegisterLeakDetection(t)
-
- // accept first connection so client is created with dial timeout
- ln, err := net.Listen("unix", "dialcancel:12345")
- if err != nil {
- t.Fatal(err)
- }
- defer ln.Close()
-
- ep := "unix://dialcancel:12345"
- cfg := Config{
- Endpoints: []string{ep},
- DialTimeout: 30 * time.Second}
- c, err := NewClient(t, cfg)
- if err != nil {
- t.Fatal(err)
- }
-
- // connect to ipv4 black hole so dial blocks
- c.SetEndpoints("http://254.0.0.1:12345")
-
- // issue Get to force redial attempts
- getc := make(chan struct{})
- go func() {
- defer close(getc)
- // Get may hang forever on grpc's Stream.Header() if its
- // context is never canceled.
- c.Get(c.Ctx(), "abc")
- }()
-
- // wait a little bit so client close is after dial starts
- time.Sleep(100 * time.Millisecond)
-
- donec := make(chan struct{})
- go func() {
- defer close(donec)
- c.Close()
- }()
-
- select {
- case <-time.After(5 * time.Second):
- t.Fatalf("failed to close")
- case <-donec:
- }
- select {
- case <-time.After(5 * time.Second):
- t.Fatalf("get failed to exit")
- case <-getc:
- }
-}
-
-func TestDialTimeout(t *testing.T) {
- testutil.RegisterLeakDetection(t)
-
- wantError := context.DeadlineExceeded
-
- // grpc.WithBlock to block until connection up or timeout
- testCfgs := []Config{
- {
- Endpoints: []string{"http://254.0.0.1:12345"},
- DialTimeout: 2 * time.Second,
- DialOptions: []grpc.DialOption{grpc.WithBlock()},
- },
- {
- Endpoints: []string{"http://254.0.0.1:12345"},
- DialTimeout: time.Second,
- DialOptions: []grpc.DialOption{grpc.WithBlock()},
- Username: "abc",
- Password: "def",
- },
- }
-
- for i, cfg := range testCfgs {
- donec := make(chan error, 1)
- go func(cfg Config, i int) {
- // without timeout, dial continues forever on ipv4 black hole
- c, err := NewClient(t, cfg)
- if c != nil || err == nil {
- t.Errorf("#%d: new client should fail", i)
- }
- donec <- err
- }(cfg, i)
-
- time.Sleep(10 * time.Millisecond)
-
- select {
- case err := <-donec:
- t.Errorf("#%d: dial didn't wait (%v)", i, err)
- default:
- }
-
- select {
- case <-time.After(5 * time.Second):
- t.Errorf("#%d: failed to timeout dial on time", i)
- case err := <-donec:
- if err.Error() != wantError.Error() {
- t.Errorf("#%d: unexpected error '%v', want '%v'", i, err, wantError)
- }
- }
- }
-}
-
-func TestDialNoTimeout(t *testing.T) {
- cfg := Config{Endpoints: []string{"127.0.0.1:12345"}}
- c, err := NewClient(t, cfg)
- if c == nil || err != nil {
- t.Fatalf("new client with DialNoWait should succeed, got %v", err)
- }
- c.Close()
-}
-
-func TestIsHaltErr(t *testing.T) {
- assert.Equal(t,
- isHaltErr(context.TODO(), errors.New("etcdserver: some etcdserver error")),
- true,
- "error created by errors.New should be unavailable error",
- )
- assert.Equal(t,
- isHaltErr(context.TODO(), rpctypes.ErrGRPCStopped),
- false,
- fmt.Sprintf(`error "%v" should not be halt error`, rpctypes.ErrGRPCStopped),
- )
- assert.Equal(t,
- isHaltErr(context.TODO(), rpctypes.ErrGRPCNoLeader),
- false,
- fmt.Sprintf(`error "%v" should not be halt error`, rpctypes.ErrGRPCNoLeader),
- )
- ctx, cancel := context.WithCancel(context.TODO())
- assert.Equal(t,
- isHaltErr(ctx, nil),
- false,
- "no error and active context should be halt error",
- )
- cancel()
- assert.Equal(t,
- isHaltErr(ctx, nil),
- true,
- "cancel on context should be halte error",
- )
-}
-
-func TestIsUnavailableErr(t *testing.T) {
- assert.Equal(t,
- isUnavailableErr(context.TODO(), errors.New("etcdserver: some etcdserver error")),
- false,
- "error created by errors.New should not be unavailable error",
- )
- assert.Equal(t,
- isUnavailableErr(context.TODO(), rpctypes.ErrGRPCStopped),
- true,
- fmt.Sprintf(`error "%v" should be unavailable error`, rpctypes.ErrGRPCStopped),
- )
- assert.Equal(t,
- isUnavailableErr(context.TODO(), rpctypes.ErrGRPCNotCapable),
- false,
- fmt.Sprintf("error %v should not be unavailable error", rpctypes.ErrGRPCNotCapable),
- )
- ctx, cancel := context.WithCancel(context.TODO())
- assert.Equal(t,
- isUnavailableErr(ctx, nil),
- false,
- "no error and active context should not be unavailable error",
- )
- cancel()
- assert.Equal(t,
- isUnavailableErr(ctx, nil),
- false,
- "cancel on context should not be unavailable error",
- )
-}
-
-func TestCloseCtxClient(t *testing.T) {
- ctx := context.Background()
- c := NewCtxClient(ctx)
- err := c.Close()
- // Close returns ctx.toErr, a nil error means an open Done channel
- if err == nil {
- t.Errorf("failed to Close the client. %v", err)
- }
-}
-
-func TestWithLogger(t *testing.T) {
- ctx := context.Background()
- c := NewCtxClient(ctx)
- if c.lg == nil {
- t.Errorf("unexpected nil in *zap.Logger")
- }
-
- c.WithLogger(nil)
- if c.lg != nil {
- t.Errorf("WithLogger should modify *zap.Logger")
- }
-}
-
-func TestZapWithLogger(t *testing.T) {
- ctx := context.Background()
- lg := zap.NewNop()
- c := NewCtxClient(ctx, WithZapLogger(lg))
-
- if c.lg != lg {
- t.Errorf("WithZapLogger should modify *zap.Logger")
- }
-}
-
-func TestAuthTokenBundleNoOverwrite(t *testing.T) {
- // This call in particular changes working directory to the tmp dir of
- // the test. The `etcd-auth-test:0` can be created in local directory,
- // not exceeding the longest allowed path on OsX.
- testutil.BeforeTest(t)
-
- // Create a mock AuthServer to handle Authenticate RPCs.
- lis, err := net.Listen("unix", "etcd-auth-test:0")
- if err != nil {
- t.Fatal(err)
- }
- defer lis.Close()
- addr := "unix://" + lis.Addr().String()
- srv := grpc.NewServer()
- etcdserverpb.RegisterAuthServer(srv, mockAuthServer{})
- go srv.Serve(lis)
- defer srv.Stop()
-
- // Create a client, which should call Authenticate on the mock server to
- // exchange username/password for an auth token.
- c, err := NewClient(t, Config{
- DialTimeout: 5 * time.Second,
- Endpoints: []string{addr},
- Username: "foo",
- Password: "bar",
- })
- if err != nil {
- t.Fatal(err)
- }
- defer c.Close()
- oldTokenBundle := c.authTokenBundle
-
- // Call the public Dial again, which should preserve the original
- // authTokenBundle.
- gc, err := c.Dial(addr)
- if err != nil {
- t.Fatal(err)
- }
- defer gc.Close()
- newTokenBundle := c.authTokenBundle
-
- if oldTokenBundle != newTokenBundle {
- t.Error("Client.authTokenBundle has been overwritten during Client.Dial")
- }
-}
-
-func TestSyncFiltersMembers(t *testing.T) {
- c, _ := NewClient(t, Config{Endpoints: []string{"http://254.0.0.1:12345"}})
- defer c.Close()
- c.Cluster = &mockCluster{
- []*etcdserverpb.Member{
- {ID: 0, Name: "", ClientURLs: []string{"http://254.0.0.1:12345"}, IsLearner: false},
- {ID: 1, Name: "isStarted", ClientURLs: []string{"http://254.0.0.2:12345"}, IsLearner: true},
- {ID: 2, Name: "isStartedAndNotLearner", ClientURLs: []string{"http://254.0.0.3:12345"}, IsLearner: false},
- },
- }
- c.Sync(context.Background())
-
- endpoints := c.Endpoints()
- if len(endpoints) != 1 || endpoints[0] != "http://254.0.0.3:12345" {
- t.Error("Client.Sync uses learner and/or non-started member client URLs")
- }
-}
-
-func TestClientRejectOldCluster(t *testing.T) {
- testutil.BeforeTest(t)
- var tests = []struct {
- name string
- endpoints []string
- versions []string
- expectedError error
- }{
- {
- name: "all new versions with the same value",
- endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"},
- versions: []string{"3.5.4", "3.5.4", "3.5.4"},
- expectedError: nil,
- },
- {
- name: "all new versions with different values",
- endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"},
- versions: []string{"3.5.4", "3.5.4", "3.4.0"},
- expectedError: nil,
- },
- {
- name: "all old versions with different values",
- endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"},
- versions: []string{"3.3.0", "3.3.0", "3.4.0"},
- expectedError: ErrOldCluster,
- },
- {
- name: "all old versions with the same value",
- endpoints: []string{"192.168.3.41:22379", "192.168.3.41:22479", "192.168.3.41:22579"},
- versions: []string{"3.3.0", "3.3.0", "3.3.0"},
- expectedError: ErrOldCluster,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if len(tt.endpoints) != len(tt.versions) || len(tt.endpoints) == 0 {
- t.Errorf("Unexpected endpoints and versions length, len(endpoints):%d, len(versions):%d", len(tt.endpoints), len(tt.versions))
- return
- }
- endpointToVersion := make(map[string]string)
- for j := range tt.endpoints {
- endpointToVersion[tt.endpoints[j]] = tt.versions[j]
- }
- c := &Client{
- ctx: context.Background(),
- endpoints: tt.endpoints,
- epMu: new(sync.RWMutex),
- Maintenance: &mockMaintenance{
- Version: endpointToVersion,
- },
- }
-
- if err := c.checkVersion(); err != tt.expectedError {
- t.Errorf("heckVersion err:%v", err)
- }
- })
-
- }
-
-}
-
-type mockMaintenance struct {
- Version map[string]string
-}
-
-func (mm mockMaintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
- return &StatusResponse{Version: mm.Version[endpoint]}, nil
-}
-
-func (mm mockMaintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
- return nil, nil
-}
-
-func (mm mockMaintenance) AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) {
- return nil, nil
-}
-
-func (mm mockMaintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
- return nil, nil
-}
-
-func (mm mockMaintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
- return nil, nil
-}
-
-func (mm mockMaintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) {
- return nil, nil
-}
-
-func (mm mockMaintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
- return nil, nil
-}
-
-func (mm mockMaintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
- return nil, nil
-}
-
-func (mm mockMaintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) {
- return nil, nil
-}
-
-type mockAuthServer struct {
- *etcdserverpb.UnimplementedAuthServer
-}
-
-func (mockAuthServer) Authenticate(context.Context, *etcdserverpb.AuthenticateRequest) (*etcdserverpb.AuthenticateResponse, error) {
- return &etcdserverpb.AuthenticateResponse{Token: "mock-token"}, nil
-}
-
-type mockCluster struct {
- members []*etcdserverpb.Member
-}
-
-func (mc *mockCluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
- return &MemberListResponse{Members: mc.members}, nil
-}
-
-func (mc *mockCluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
- return nil, nil
-}
-
-func (mc *mockCluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
- return nil, nil
-}
-
-func (mc *mockCluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
- return nil, nil
-}
-
-func (mc *mockCluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
- return nil, nil
-}
-
-func (mc *mockCluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
- return nil, nil
-}
diff --git a/client/v3/clientv3util/example_key_test.go b/client/v3/clientv3util/example_key_test.go
deleted file mode 100644
index fbbbe417260..00000000000
--- a/client/v3/clientv3util/example_key_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3util_test
-
-import (
- "context"
- "log"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/clientv3util"
-)
-
-func ExampleKeyMissing() {
- cli, err := clientv3.New(clientv3.Config{
- Endpoints: []string{"127.0.0.1:2379"},
- })
- if err != nil {
- log.Fatal(err)
- }
- defer cli.Close()
- kvc := clientv3.NewKV(cli)
-
- // perform a put only if key is missing
- // It is useful to do the check atomically to avoid overwriting
- // the existing key which would generate potentially unwanted events,
- // unless of course you wanted to do an overwrite no matter what.
- _, err = kvc.Txn(context.Background()).
- If(clientv3util.KeyMissing("purpleidea")).
- Then(clientv3.OpPut("purpleidea", "hello world")).
- Commit()
- if err != nil {
- log.Fatal(err)
- }
-}
-
-func ExampleKeyExists() {
- cli, err := clientv3.New(clientv3.Config{
- Endpoints: []string{"127.0.0.1:2379"},
- })
- if err != nil {
- log.Fatal(err)
- }
- defer cli.Close()
- kvc := clientv3.NewKV(cli)
-
- // perform a delete only if key already exists
- _, err = kvc.Txn(context.Background()).
- If(clientv3util.KeyExists("purpleidea")).
- Then(clientv3.OpDelete("purpleidea")).
- Commit()
- if err != nil {
- log.Fatal(err)
- }
-}
diff --git a/client/v3/clientv3util/util.go b/client/v3/clientv3util/util.go
deleted file mode 100644
index 144777bd2c7..00000000000
--- a/client/v3/clientv3util/util.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package clientv3util contains utility functions derived from clientv3.
-package clientv3util
-
-import (
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-// KeyExists returns a comparison operation that evaluates to true iff the given
-// key exists. It does this by checking if the key `Version` is greater than 0.
-// It is a useful guard in transaction delete operations.
-func KeyExists(key string) clientv3.Cmp {
- return clientv3.Compare(clientv3.Version(key), ">", 0)
-}
-
-// KeyMissing returns a comparison operation that evaluates to true iff the
-// given key does not exist.
-func KeyMissing(key string) clientv3.Cmp {
- return clientv3.Compare(clientv3.Version(key), "=", 0)
-}
diff --git a/client/v3/cluster.go b/client/v3/cluster.go
deleted file mode 100644
index 92d7cdb56b0..00000000000
--- a/client/v3/cluster.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/types"
-
- "google.golang.org/grpc"
-)
-
-type (
- Member pb.Member
- MemberListResponse pb.MemberListResponse
- MemberAddResponse pb.MemberAddResponse
- MemberRemoveResponse pb.MemberRemoveResponse
- MemberUpdateResponse pb.MemberUpdateResponse
- MemberPromoteResponse pb.MemberPromoteResponse
-)
-
-type Cluster interface {
- // MemberList lists the current cluster membership.
- MemberList(ctx context.Context) (*MemberListResponse, error)
-
- // MemberAdd adds a new member into the cluster.
- MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
-
- // MemberAddAsLearner adds a new learner member into the cluster.
- MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
-
- // MemberRemove removes an existing member from the cluster.
- MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
-
- // MemberUpdate updates the peer addresses of the member.
- MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
-
- // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
- MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error)
-}
-
-type cluster struct {
- remote pb.ClusterClient
- callOpts []grpc.CallOption
-}
-
-func NewCluster(c *Client) Cluster {
- api := &cluster{remote: RetryClusterClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
- api := &cluster{remote: remote}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
- return c.memberAdd(ctx, peerAddrs, false)
-}
-
-func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
- return c.memberAdd(ctx, peerAddrs, true)
-}
-
-func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) {
- // fail-fast before panic in rafthttp
- if _, err := types.NewURLs(peerAddrs); err != nil {
- return nil, err
- }
-
- r := &pb.MemberAddRequest{
- PeerURLs: peerAddrs,
- IsLearner: isLearner,
- }
- resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*MemberAddResponse)(resp), nil
-}
-
-func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
- r := &pb.MemberRemoveRequest{ID: id}
- resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*MemberRemoveResponse)(resp), nil
-}
-
-func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
- // fail-fast before panic in rafthttp
- if _, err := types.NewURLs(peerAddrs); err != nil {
- return nil, err
- }
-
- // it is safe to retry on update.
- r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
- resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
- if err == nil {
- return (*MemberUpdateResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
- // it is safe to retry on list.
- resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: true}, c.callOpts...)
- if err == nil {
- return (*MemberListResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
- r := &pb.MemberPromoteRequest{ID: id}
- resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*MemberPromoteResponse)(resp), nil
-}
diff --git a/client/v3/compact_op_test.go b/client/v3/compact_op_test.go
deleted file mode 100644
index f483322adf5..00000000000
--- a/client/v3/compact_op_test.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "reflect"
- "testing"
-
- "go.etcd.io/etcd/api/v3/etcdserverpb"
-)
-
-func TestCompactOp(t *testing.T) {
- req1 := OpCompact(100, WithCompactPhysical()).toRequest()
- req2 := &etcdserverpb.CompactionRequest{Revision: 100, Physical: true}
- if !reflect.DeepEqual(req1, req2) {
- t.Fatalf("expected %+v, got %+v", req2, req1)
- }
-}
diff --git a/client/v3/compare.go b/client/v3/compare.go
deleted file mode 100644
index e2967cf38ed..00000000000
--- a/client/v3/compare.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-)
-
-type CompareTarget int
-type CompareResult int
-
-const (
- CompareVersion CompareTarget = iota
- CompareCreated
- CompareModified
- CompareValue
-)
-
-type Cmp pb.Compare
-
-func Compare(cmp Cmp, result string, v interface{}) Cmp {
- var r pb.Compare_CompareResult
-
- switch result {
- case "=":
- r = pb.Compare_EQUAL
- case "!=":
- r = pb.Compare_NOT_EQUAL
- case ">":
- r = pb.Compare_GREATER
- case "<":
- r = pb.Compare_LESS
- default:
- panic("Unknown result op")
- }
-
- cmp.Result = r
- switch cmp.Target {
- case pb.Compare_VALUE:
- val, ok := v.(string)
- if !ok {
- panic("bad compare value")
- }
- cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
- case pb.Compare_VERSION:
- cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
- case pb.Compare_CREATE:
- cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
- case pb.Compare_MOD:
- cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
- case pb.Compare_LEASE:
- cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
- default:
- panic("Unknown compare type")
- }
- return cmp
-}
-
-func Value(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
-}
-
-func Version(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
-}
-
-func CreateRevision(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
-}
-
-func ModRevision(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
-}
-
-// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
-// LeaseID is 0, otherwise known as `NoLease`.
-func LeaseValue(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
-}
-
-// KeyBytes returns the byte slice holding with the comparison key.
-func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
-
-// WithKeyBytes sets the byte slice for the comparison key.
-func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
-
-// ValueBytes returns the byte slice holding the comparison value, if any.
-func (cmp *Cmp) ValueBytes() []byte {
- if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
- return tu.Value
- }
- return nil
-}
-
-// WithValueBytes sets the byte slice for the comparison's value.
-func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
-
-// WithRange sets the comparison to scan the range [key, end).
-func (cmp Cmp) WithRange(end string) Cmp {
- cmp.RangeEnd = []byte(end)
- return cmp
-}
-
-// WithPrefix sets the comparison to scan all keys prefixed by the key.
-func (cmp Cmp) WithPrefix() Cmp {
- cmp.RangeEnd = getPrefix(cmp.Key)
- return cmp
-}
-
-// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
-func mustInt64(val interface{}) int64 {
- if v, ok := val.(int64); ok {
- return v
- }
- if v, ok := val.(int); ok {
- return int64(v)
- }
- panic("bad value")
-}
-
-// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
-// int64 otherwise.
-func mustInt64orLeaseID(val interface{}) int64 {
- if v, ok := val.(LeaseID); ok {
- return int64(v)
- }
- return mustInt64(val)
-}
diff --git a/client/v3/concurrency/election.go b/client/v3/concurrency/election.go
deleted file mode 100644
index 31e93d24280..00000000000
--- a/client/v3/concurrency/election.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "errors"
- "fmt"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
-)
-
-var (
- ErrElectionNotLeader = errors.New("election: not leader")
- ErrElectionNoLeader = errors.New("election: no leader")
-)
-
-type Election struct {
- session *Session
-
- keyPrefix string
-
- leaderKey string
- leaderRev int64
- leaderSession *Session
- hdr *pb.ResponseHeader
-}
-
-// NewElection returns a new election on a given key prefix.
-func NewElection(s *Session, pfx string) *Election {
- return &Election{session: s, keyPrefix: pfx + "/"}
-}
-
-// ResumeElection initializes an election with a known leader.
-func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
- return &Election{
- keyPrefix: pfx,
- session: s,
- leaderKey: leaderKey,
- leaderRev: leaderRev,
- leaderSession: s,
- }
-}
-
-// Campaign puts a value as eligible for the election on the prefix
-// key.
-// Multiple sessions can participate in the election for the
-// same prefix, but only one can be the leader at a time.
-//
-// If the context is 'context.TODO()/context.Background()', the Campaign
-// will continue to be blocked for other keys to be deleted, unless server
-// returns a non-recoverable error (e.g. ErrCompacted).
-// Otherwise, until the context is not cancelled or timed-out, Campaign will
-// continue to be blocked until it becomes the leader.
-func (e *Election) Campaign(ctx context.Context, val string) error {
- s := e.session
- client := e.session.Client()
-
- k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
- txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
- txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
- txn = txn.Else(v3.OpGet(k))
- resp, err := txn.Commit()
- if err != nil {
- return err
- }
- e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
- if !resp.Succeeded {
- kv := resp.Responses[0].GetResponseRange().Kvs[0]
- e.leaderRev = kv.CreateRevision
- if string(kv.Value) != val {
- if err = e.Proclaim(ctx, val); err != nil {
- e.Resign(ctx)
- return err
- }
- }
- }
-
- _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
- if err != nil {
- // clean up in case of context cancel
- select {
- case <-ctx.Done():
- e.Resign(client.Ctx())
- default:
- e.leaderSession = nil
- }
- return err
- }
- e.hdr = resp.Header
-
- return nil
-}
-
-// Proclaim lets the leader announce a new value without another election.
-func (e *Election) Proclaim(ctx context.Context, val string) error {
- if e.leaderSession == nil {
- return ErrElectionNotLeader
- }
- client := e.session.Client()
- cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
- txn := client.Txn(ctx).If(cmp)
- txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
- tresp, terr := txn.Commit()
- if terr != nil {
- return terr
- }
- if !tresp.Succeeded {
- e.leaderKey = ""
- return ErrElectionNotLeader
- }
-
- e.hdr = tresp.Header
- return nil
-}
-
-// Resign lets a leader start a new election.
-func (e *Election) Resign(ctx context.Context) (err error) {
- if e.leaderSession == nil {
- return nil
- }
- client := e.session.Client()
- cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
- resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
- if err == nil {
- e.hdr = resp.Header
- }
- e.leaderKey = ""
- e.leaderSession = nil
- return err
-}
-
-// Leader returns the leader value for the current election.
-func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
- client := e.session.Client()
- resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
- if err != nil {
- return nil, err
- } else if len(resp.Kvs) == 0 {
- // no leader currently elected
- return nil, ErrElectionNoLeader
- }
- return resp, nil
-}
-
-// Observe returns a channel that reliably observes ordered leader proposals
-// as GetResponse values on every current elected leader key. It will not
-// necessarily fetch all historical leader updates, but will always post the
-// most recent leader value.
-//
-// The channel closes when the context is canceled or the underlying watcher
-// is otherwise disrupted.
-func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
- retc := make(chan v3.GetResponse)
- go e.observe(ctx, retc)
- return retc
-}
-
-func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
- client := e.session.Client()
-
- defer close(ch)
- for {
- resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
- if err != nil {
- return
- }
-
- var kv *mvccpb.KeyValue
- var hdr *pb.ResponseHeader
-
- if len(resp.Kvs) == 0 {
- cctx, cancel := context.WithCancel(ctx)
- // wait for first key put on prefix
- opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
- wch := client.Watch(cctx, e.keyPrefix, opts...)
- for kv == nil {
- wr, ok := <-wch
- if !ok || wr.Err() != nil {
- cancel()
- return
- }
- // only accept puts; a delete will make observe() spin
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.PUT {
- hdr, kv = &wr.Header, ev.Kv
- // may have multiple revs; hdr.rev = the last rev
- // set to kv's rev in case batch has multiple Puts
- hdr.Revision = kv.ModRevision
- break
- }
- }
- }
- cancel()
- } else {
- hdr, kv = resp.Header, resp.Kvs[0]
- }
-
- select {
- case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
- case <-ctx.Done():
- return
- }
-
- cctx, cancel := context.WithCancel(ctx)
- wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
- keyDeleted := false
- for !keyDeleted {
- wr, ok := <-wch
- if !ok {
- cancel()
- return
- }
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.DELETE {
- keyDeleted = true
- break
- }
- resp.Header = &wr.Header
- resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
- select {
- case ch <- *resp:
- case <-cctx.Done():
- cancel()
- return
- }
- }
- }
- cancel()
- }
-}
-
-// Key returns the leader key if elected, empty string otherwise.
-func (e *Election) Key() string { return e.leaderKey }
-
-// Rev returns the leader key's creation revision, if elected.
-func (e *Election) Rev() int64 { return e.leaderRev }
-
-// Header is the response header from the last successful election proposal.
-func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/client/v3/concurrency/example_election_test.go b/client/v3/concurrency/example_election_test.go
deleted file mode 120000
index a76f0a7f4ef..00000000000
--- a/client/v3/concurrency/example_election_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../../tests/integration/clientv3/concurrency/example_election_test.go
\ No newline at end of file
diff --git a/client/v3/concurrency/example_mutex_test.go b/client/v3/concurrency/example_mutex_test.go
deleted file mode 120000
index 053eb74ad6a..00000000000
--- a/client/v3/concurrency/example_mutex_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../../tests/integration/clientv3/concurrency/example_mutex_test.go
\ No newline at end of file
diff --git a/client/v3/concurrency/example_stm_test.go b/client/v3/concurrency/example_stm_test.go
deleted file mode 120000
index d63639ecc68..00000000000
--- a/client/v3/concurrency/example_stm_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../../tests/integration/clientv3/concurrency/example_stm_test.go
\ No newline at end of file
diff --git a/client/v3/concurrency/key.go b/client/v3/concurrency/key.go
deleted file mode 100644
index 8a5d6e1f175..00000000000
--- a/client/v3/concurrency/key.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "errors"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
-)
-
-func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- var wr v3.WatchResponse
- wch := client.Watch(cctx, key, v3.WithRev(rev))
- for wr = range wch {
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.DELETE {
- return nil
- }
- }
- }
- if err := wr.Err(); err != nil {
- return err
- }
- if err := ctx.Err(); err != nil {
- return err
- }
- return errors.New("lost watcher waiting for delete")
-}
-
-// waitDeletes efficiently waits until all keys matching the prefix and no greater
-// than the create revision are deleted.
-func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
- getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
- for {
- resp, err := client.Get(ctx, pfx, getOpts...)
- if err != nil {
- return nil, err
- }
- if len(resp.Kvs) == 0 {
- return resp.Header, nil
- }
- lastKey := string(resp.Kvs[0].Key)
- if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
- return nil, err
- }
- }
-}
diff --git a/client/v3/concurrency/main_test.go b/client/v3/concurrency/main_test.go
deleted file mode 100644
index d8819be04d2..00000000000
--- a/client/v3/concurrency/main_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency_test
-
-import (
- "testing"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-func exampleEndpoints() []string { return nil }
-
-func forUnitTestsRunInMockedContext(mocking func(), example func()) {
- mocking()
- // TODO: Call 'example' when mocking() provides realistic mocking of transport.
-
- // The real testing logic of examples gets executed
- // as part of ./tests/integration/clientv3/integration/...
-}
-
-func TestMain(m *testing.M) {
- testutil.MustTestMainWithLeakDetection(m)
-}
diff --git a/client/v3/concurrency/mutex.go b/client/v3/concurrency/mutex.go
deleted file mode 100644
index 7080f0b08dd..00000000000
--- a/client/v3/concurrency/mutex.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- v3 "go.etcd.io/etcd/client/v3"
-)
-
-// ErrLocked is returned by TryLock when Mutex is already locked by another session.
-var ErrLocked = errors.New("mutex: Locked by another session")
-var ErrSessionExpired = errors.New("mutex: session is expired")
-var ErrLockReleased = errors.New("mutex: lock has already been released")
-
-// Mutex implements the sync Locker interface with etcd
-type Mutex struct {
- s *Session
-
- pfx string
- myKey string
- myRev int64
- hdr *pb.ResponseHeader
-}
-
-func NewMutex(s *Session, pfx string) *Mutex {
- return &Mutex{s, pfx + "/", "", -1, nil}
-}
-
-// TryLock locks the mutex if not already locked by another session.
-// If lock is held by another session, return immediately after attempting necessary cleanup
-// The ctx argument is used for the sending/receiving Txn RPC.
-func (m *Mutex) TryLock(ctx context.Context) error {
- resp, err := m.tryAcquire(ctx)
- if err != nil {
- return err
- }
- // if no key on prefix / the minimum rev is key, already hold the lock
- ownerKey := resp.Responses[1].GetResponseRange().Kvs
- if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
- m.hdr = resp.Header
- return nil
- }
- client := m.s.Client()
- // Cannot lock, so delete the key
- if _, err := client.Delete(ctx, m.myKey); err != nil {
- return err
- }
- m.myKey = "\x00"
- m.myRev = -1
- return ErrLocked
-}
-
-// Lock locks the mutex with a cancelable context. If the context is canceled
-// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
-func (m *Mutex) Lock(ctx context.Context) error {
- resp, err := m.tryAcquire(ctx)
- if err != nil {
- return err
- }
- // if no key on prefix / the minimum rev is key, already hold the lock
- ownerKey := resp.Responses[1].GetResponseRange().Kvs
- if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
- m.hdr = resp.Header
- return nil
- }
- client := m.s.Client()
- // wait for deletion revisions prior to myKey
- // TODO: early termination if the session key is deleted before other session keys with smaller revisions.
- _, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
- // release lock key if wait failed
- if werr != nil {
- m.Unlock(client.Ctx())
- return werr
- }
-
- // make sure the session is not expired, and the owner key still exists.
- gresp, werr := client.Get(ctx, m.myKey)
- if werr != nil {
- m.Unlock(client.Ctx())
- return werr
- }
-
- if len(gresp.Kvs) == 0 { // is the session key lost?
- return ErrSessionExpired
- }
- m.hdr = gresp.Header
-
- return nil
-}
-
-func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
- s := m.s
- client := m.s.Client()
-
- m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
- cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
- // put self in lock waiters via myKey; oldest waiter holds lock
- put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
- // reuse key in case this session already holds the lock
- get := v3.OpGet(m.myKey)
- // fetch current holder to complete uncontended path with only one RPC
- getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
- resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
- if err != nil {
- return nil, err
- }
- m.myRev = resp.Header.Revision
- if !resp.Succeeded {
- m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
- }
- return resp, nil
-}
-
-func (m *Mutex) Unlock(ctx context.Context) error {
- if m.myKey == "" || m.myRev <= 0 || m.myKey == "\x00" {
- return ErrLockReleased
- }
-
- if !strings.HasPrefix(m.myKey, m.pfx) {
- return fmt.Errorf("invalid key %q, it should have prefix %q", m.myKey, m.pfx)
- }
-
- client := m.s.Client()
- if _, err := client.Delete(ctx, m.myKey); err != nil {
- return err
- }
- m.myKey = "\x00"
- m.myRev = -1
- return nil
-}
-
-func (m *Mutex) IsOwner() v3.Cmp {
- return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
-}
-
-func (m *Mutex) Key() string { return m.myKey }
-
-// Header is the response header received from etcd on acquiring the lock.
-func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
-
-type lockerMutex struct{ *Mutex }
-
-func (lm *lockerMutex) Lock() {
- client := lm.s.Client()
- if err := lm.Mutex.Lock(client.Ctx()); err != nil {
- panic(err)
- }
-}
-func (lm *lockerMutex) Unlock() {
- client := lm.s.Client()
- if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
- panic(err)
- }
-}
-
-// NewLocker creates a sync.Locker backed by an etcd mutex.
-func NewLocker(s *Session, pfx string) sync.Locker {
- return &lockerMutex{NewMutex(s, pfx)}
-}
diff --git a/client/v3/config.go b/client/v3/config.go
deleted file mode 100644
index 4a26714a864..00000000000
--- a/client/v3/config.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "crypto/tls"
- "time"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
-
- "go.etcd.io/etcd/client/pkg/v3/transport"
-)
-
-type Config struct {
- // Endpoints is a list of URLs.
- Endpoints []string `json:"endpoints"`
-
- // AutoSyncInterval is the interval to update endpoints with its latest members.
- // 0 disables auto-sync. By default auto-sync is disabled.
- AutoSyncInterval time.Duration `json:"auto-sync-interval"`
-
- // DialTimeout is the timeout for failing to establish a connection.
- DialTimeout time.Duration `json:"dial-timeout"`
-
- // DialKeepAliveTime is the time after which client pings the server to see if
- // transport is alive.
- DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
-
- // DialKeepAliveTimeout is the time that the client waits for a response for the
- // keep-alive probe. If the response is not received in this time, the connection is closed.
- DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
-
- // MaxCallSendMsgSize is the client-side request send limit in bytes.
- // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
- // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
- // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
- MaxCallSendMsgSize int
-
- // MaxCallRecvMsgSize is the client-side response receive limit.
- // If 0, it defaults to "math.MaxInt32", because range response can
- // easily exceed request send limits.
- // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
- // ("--max-recv-bytes" flag to etcd).
- MaxCallRecvMsgSize int
-
- // TLS holds the client secure credentials, if any.
- TLS *tls.Config
-
- // Username is a user name for authentication.
- Username string `json:"username"`
-
- // Password is a password for authentication.
- Password string `json:"password"`
-
- // RejectOldCluster when set will refuse to create a client against an outdated cluster.
- RejectOldCluster bool `json:"reject-old-cluster"`
-
- // DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
- // For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
- // Without this, Dial returns immediately and connecting the server happens in background.
- DialOptions []grpc.DialOption
-
- // Context is the default client context; it can be used to cancel grpc dial out and
- // other operations that do not have an explicit context.
- Context context.Context
-
- // Logger sets client-side logger.
- // If nil, fallback to building LogConfig.
- Logger *zap.Logger
-
- // LogConfig configures client-side logger.
- // If nil, use the default logger.
- // TODO: configure gRPC logger
- LogConfig *zap.Config
-
- // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
- PermitWithoutStream bool `json:"permit-without-stream"`
-
- // TODO: support custom balancer picker
-}
-
-// ConfigSpec is the configuration from users, which comes from command-line flags,
-// environment variables or config file. It is a fully declarative configuration,
-// and can be serialized & deserialized to/from JSON.
-type ConfigSpec struct {
- Endpoints []string `json:"endpoints"`
- RequestTimeout time.Duration `json:"request-timeout"`
- DialTimeout time.Duration `json:"dial-timeout"`
- KeepAliveTime time.Duration `json:"keepalive-time"`
- KeepAliveTimeout time.Duration `json:"keepalive-timeout"`
- Secure *SecureConfig `json:"secure"`
- Auth *AuthConfig `json:"auth"`
-}
-
-type SecureConfig struct {
- Cert string `json:"cert"`
- Key string `json:"key"`
- Cacert string `json:"cacert"`
- ServerName string `json:"server-name"`
-
- InsecureTransport bool `json:"insecure-transport"`
- InsecureSkipVerify bool `json:"insecure-skip-tls-verify"`
-}
-
-type AuthConfig struct {
- Username string `json:"username"`
- Password string `json:"password"`
-}
-
-func (cfg AuthConfig) Empty() bool {
- return cfg.Username == "" && cfg.Password == ""
-}
-
-// NewClientConfig creates a Config based on the provided ConfigSpec.
-func NewClientConfig(confSpec *ConfigSpec, lg *zap.Logger) (*Config, error) {
- tlsCfg, err := newTLSConfig(confSpec.Secure, lg)
- if err != nil {
- return nil, err
- }
-
- cfg := &Config{
- Endpoints: confSpec.Endpoints,
- DialTimeout: confSpec.DialTimeout,
- DialKeepAliveTime: confSpec.KeepAliveTime,
- DialKeepAliveTimeout: confSpec.KeepAliveTimeout,
- TLS: tlsCfg,
- }
-
- if confSpec.Auth != nil {
- cfg.Username = confSpec.Auth.Username
- cfg.Password = confSpec.Auth.Password
- }
-
- return cfg, nil
-}
-
-func newTLSConfig(scfg *SecureConfig, lg *zap.Logger) (*tls.Config, error) {
- var (
- tlsCfg *tls.Config
- err error
- )
-
- if scfg == nil {
- return nil, nil
- }
-
- if scfg.Cert != "" || scfg.Key != "" || scfg.Cacert != "" || scfg.ServerName != "" {
- cfgtls := &transport.TLSInfo{
- CertFile: scfg.Cert,
- KeyFile: scfg.Key,
- TrustedCAFile: scfg.Cacert,
- ServerName: scfg.ServerName,
- Logger: lg,
- }
- if tlsCfg, err = cfgtls.ClientConfig(); err != nil {
- return nil, err
- }
- }
-
- // If key/cert is not given but user wants secure connection, we
- // should still setup an empty tls configuration for gRPC to setup
- // secure connection.
- if tlsCfg == nil && !scfg.InsecureTransport {
- tlsCfg = &tls.Config{}
- }
-
- // If the user wants to skip TLS verification then we should set
- // the InsecureSkipVerify flag in tls configuration.
- if scfg.InsecureSkipVerify {
- if tlsCfg == nil {
- tlsCfg = &tls.Config{}
- }
- tlsCfg.InsecureSkipVerify = scfg.InsecureSkipVerify
- }
-
- return tlsCfg, nil
-}
diff --git a/client/v3/config_test.go b/client/v3/config_test.go
deleted file mode 100644
index a99c3fd5864..00000000000
--- a/client/v3/config_test.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "crypto/tls"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "go.uber.org/zap"
-
- "go.etcd.io/etcd/client/pkg/v3/logutil"
- "go.etcd.io/etcd/client/pkg/v3/transport"
-)
-
-func TestNewClientConfig(t *testing.T) {
- cases := []struct {
- name string
- spec ConfigSpec
- expectedConf Config
- }{
- {
- name: "only has basic info",
- spec: ConfigSpec{
- Endpoints: []string{"http://192.168.0.10:2379"},
- DialTimeout: 2 * time.Second,
- KeepAliveTime: 3 * time.Second,
- KeepAliveTimeout: 5 * time.Second,
- },
- expectedConf: Config{
- Endpoints: []string{"http://192.168.0.10:2379"},
- DialTimeout: 2 * time.Second,
- DialKeepAliveTime: 3 * time.Second,
- DialKeepAliveTimeout: 5 * time.Second,
- },
- },
- {
- name: "auth enabled",
- spec: ConfigSpec{
- Endpoints: []string{"http://192.168.0.12:2379"},
- DialTimeout: 1 * time.Second,
- KeepAliveTime: 4 * time.Second,
- KeepAliveTimeout: 6 * time.Second,
- Auth: &AuthConfig{
- Username: "test",
- Password: "changeme",
- },
- },
- expectedConf: Config{
- Endpoints: []string{"http://192.168.0.12:2379"},
- DialTimeout: 1 * time.Second,
- DialKeepAliveTime: 4 * time.Second,
- DialKeepAliveTimeout: 6 * time.Second,
- Username: "test",
- Password: "changeme",
- },
- },
- {
- name: "default secure transport",
- spec: ConfigSpec{
- Endpoints: []string{"http://192.168.0.10:2379"},
- DialTimeout: 2 * time.Second,
- KeepAliveTime: 3 * time.Second,
- KeepAliveTimeout: 5 * time.Second,
- Secure: &SecureConfig{
- InsecureTransport: false,
- },
- },
- expectedConf: Config{
- Endpoints: []string{"http://192.168.0.10:2379"},
- DialTimeout: 2 * time.Second,
- DialKeepAliveTime: 3 * time.Second,
- DialKeepAliveTimeout: 5 * time.Second,
- TLS: &tls.Config{},
- },
- },
- {
- name: "default secure transport and skip TLS verification",
- spec: ConfigSpec{
- Endpoints: []string{"http://192.168.0.13:2379"},
- DialTimeout: 1 * time.Second,
- KeepAliveTime: 3 * time.Second,
- KeepAliveTimeout: 5 * time.Second,
- Secure: &SecureConfig{
- InsecureTransport: false,
- InsecureSkipVerify: true,
- },
- },
- expectedConf: Config{
- Endpoints: []string{"http://192.168.0.13:2379"},
- DialTimeout: 1 * time.Second,
- DialKeepAliveTime: 3 * time.Second,
- DialKeepAliveTimeout: 5 * time.Second,
- TLS: &tls.Config{
- InsecureSkipVerify: true,
- },
- },
- },
- {
- name: "insecure transport and skip TLS verification",
- spec: ConfigSpec{
- Endpoints: []string{"http://192.168.0.13:2379"},
- DialTimeout: 1 * time.Second,
- KeepAliveTime: 3 * time.Second,
- KeepAliveTimeout: 5 * time.Second,
- Secure: &SecureConfig{
- InsecureTransport: true,
- InsecureSkipVerify: true,
- },
- },
- expectedConf: Config{
- Endpoints: []string{"http://192.168.0.13:2379"},
- DialTimeout: 1 * time.Second,
- DialKeepAliveTime: 3 * time.Second,
- DialKeepAliveTimeout: 5 * time.Second,
- TLS: &tls.Config{
- InsecureSkipVerify: true,
- },
- },
- },
- }
-
- for _, tc := range cases {
- t.Run(tc.name, func(t *testing.T) {
- lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel)
-
- cfg, err := NewClientConfig(&tc.spec, lg)
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
-
- assert.Equal(t, tc.expectedConf, *cfg)
- })
- }
-}
-
-func TestNewClientConfigWithSecureCfg(t *testing.T) {
- tls, err := transport.SelfCert(zap.NewNop(), t.TempDir(), []string{"localhost"}, 1)
- if err != nil {
- t.Fatalf("Unexpected error: %v", err)
- }
-
- scfg := &SecureConfig{
- Cert: tls.CertFile,
- Key: tls.KeyFile,
- Cacert: tls.TrustedCAFile,
- }
-
- cfg, err := NewClientConfig(&ConfigSpec{
- Endpoints: []string{"http://192.168.0.13:2379"},
- DialTimeout: 2 * time.Second,
- KeepAliveTime: 3 * time.Second,
- KeepAliveTimeout: 5 * time.Second,
- Secure: scfg,
- }, nil)
- if err != nil || cfg == nil || cfg.TLS == nil {
- t.Fatalf("Unexpected result client config: %v", err)
- }
-}
diff --git a/client/v3/credentials/credentials_test.go b/client/v3/credentials/credentials_test.go
deleted file mode 100644
index 5111a2ad5ec..00000000000
--- a/client/v3/credentials/credentials_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credentials
-
-import (
- "context"
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
-)
-
-func TestUpdateAuthToken(t *testing.T) {
- bundle := NewBundle(Config{})
- ctx := context.TODO()
-
- metadataBeforeUpdate, _ := bundle.PerRPCCredentials().GetRequestMetadata(ctx)
- assert.Empty(t, metadataBeforeUpdate)
-
- bundle.UpdateAuthToken("abcdefg")
-
- metadataAfterUpdate, _ := bundle.PerRPCCredentials().GetRequestMetadata(ctx)
- assert.Equal(t, metadataAfterUpdate[rpctypes.TokenFieldNameGRPC], "abcdefg")
-}
diff --git a/client/v3/ctx_test.go b/client/v3/ctx_test.go
deleted file mode 100644
index 097b6a3e6b1..00000000000
--- a/client/v3/ctx_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2020 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "reflect"
- "testing"
-
- "google.golang.org/grpc/metadata"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/api/v3/version"
-)
-
-func TestMetadataWithRequireLeader(t *testing.T) {
- ctx := context.TODO()
- _, ok := metadata.FromOutgoingContext(ctx)
- if ok {
- t.Fatal("expected no outgoing metadata ctx key")
- }
-
- // add a conflicting key with some other value
- md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, "invalid")
- // add a key, and expect not be overwritten
- md.Set("hello", "1", "2")
- ctx = metadata.NewOutgoingContext(ctx, md)
-
- // expect overwrites but still keep other keys
- ctx = WithRequireLeader(ctx)
- md, ok = metadata.FromOutgoingContext(ctx)
- if !ok {
- t.Fatal("expected outgoing metadata ctx key")
- }
- if ss := md.Get(rpctypes.MetadataRequireLeaderKey); !reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}) {
- t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss)
- }
- if ss := md.Get("hello"); !reflect.DeepEqual(ss, []string{"1", "2"}) {
- t.Fatalf("unexpected metadata for 'hello' %v", ss)
- }
-}
-
-func TestMetadataWithClientAPIVersion(t *testing.T) {
- ctx := withVersion(WithRequireLeader(context.TODO()))
-
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok {
- t.Fatal("expected outgoing metadata ctx key")
- }
- if ss := md.Get(rpctypes.MetadataRequireLeaderKey); !reflect.DeepEqual(ss, []string{rpctypes.MetadataHasLeader}) {
- t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataRequireLeaderKey, ss)
- }
- if ss := md.Get(rpctypes.MetadataClientAPIVersionKey); !reflect.DeepEqual(ss, []string{version.APIVersion}) {
- t.Fatalf("unexpected metadata for %q %v", rpctypes.MetadataClientAPIVersionKey, ss)
- }
-}
diff --git a/client/v3/doc.go b/client/v3/doc.go
deleted file mode 100644
index bd820d3d79e..00000000000
--- a/client/v3/doc.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package clientv3 implements the official Go etcd client for v3.
-//
-// Create client using `clientv3.New`:
-//
-// // expect dial time-out on ipv4 blackhole
-// _, err := clientv3.New(clientv3.Config{
-// Endpoints: []string{"http://254.0.0.1:12345"},
-// DialTimeout: 2 * time.Second,
-// })
-//
-// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
-// if err == context.DeadlineExceeded {
-// // handle errors
-// }
-//
-// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
-// if err == grpc.ErrClientConnTimeout {
-// // handle errors
-// }
-//
-// cli, err := clientv3.New(clientv3.Config{
-// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
-// DialTimeout: 5 * time.Second,
-// })
-// if err != nil {
-// // handle error!
-// }
-// defer cli.Close()
-//
-// Make sure to close the client after using it. If the client is not closed, the
-// connection will have leaky goroutines.
-//
-// To specify a client request timeout, wrap the context with context.WithTimeout:
-//
-// ctx, cancel := context.WithTimeout(context.Background(), timeout)
-// defer cancel()
-// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
-// if err != nil {
-// // handle error!
-// }
-// // use the response
-//
-// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
-// Clients are safe for concurrent use by multiple goroutines.
-//
-// etcd client returns 2 types of errors:
-//
-// 1. context error: canceled or deadline exceeded.
-// 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded.
-// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go
-//
-// Here is the example code to handle client errors:
-//
-// resp, err := kvc.Put(ctx, "", "")
-// if err != nil {
-// if err == context.Canceled {
-// // ctx is canceled by another routine
-// } else if err == context.DeadlineExceeded {
-// // ctx is attached with a deadline and it exceeded
-// } else if err == rpctypes.ErrEmptyKey {
-// // client-side error: key is not provided
-// } else if ev, ok := status.FromError(err); ok {
-// code := ev.Code()
-// if code == codes.DeadlineExceeded {
-// // server-side context might have timed-out first (due to clock skew)
-// // while original client-side context is not timed-out yet
-// }
-// } else {
-// // bad cluster endpoints, which are not etcd servers
-// }
-// }
-//
-// go func() { cli.Close() }()
-// _, err := kvc.Get(ctx, "a")
-// if err != nil {
-// // with etcd clientv3 <= v3.3
-// if err == context.Canceled {
-// // grpc balancer calls 'Get' with an inflight client.Close
-// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x
-// // grpc balancer calls 'Get' after client.Close.
-// }
-// // with etcd clientv3 >= v3.4
-// if clientv3.IsConnCanceled(err) {
-// // gRPC client connection is closed
-// }
-// }
-//
-// The grpc load balancer is registered statically and is shared across etcd clients.
-// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
-// variable. E.g. "ETCD_CLIENT_DEBUG=1".
-package clientv3
diff --git a/client/v3/example_auth_test.go b/client/v3/example_auth_test.go
deleted file mode 120000
index 7a25cc2a033..00000000000
--- a/client/v3/example_auth_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../tests/integration/clientv3/examples/example_auth_test.go
\ No newline at end of file
diff --git a/client/v3/example_cluster_test.go b/client/v3/example_cluster_test.go
deleted file mode 120000
index 302451f8113..00000000000
--- a/client/v3/example_cluster_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../tests/integration/clientv3/examples/example_cluster_test.go
\ No newline at end of file
diff --git a/client/v3/example_kv_test.go b/client/v3/example_kv_test.go
deleted file mode 120000
index 0b3bd875e92..00000000000
--- a/client/v3/example_kv_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../tests/integration/clientv3/examples/example_kv_test.go
\ No newline at end of file
diff --git a/client/v3/example_lease_test.go b/client/v3/example_lease_test.go
deleted file mode 120000
index d1cf744bb6d..00000000000
--- a/client/v3/example_lease_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../tests/integration/clientv3/examples/example_lease_test.go
\ No newline at end of file
diff --git a/client/v3/example_maintenance_test.go b/client/v3/example_maintenance_test.go
deleted file mode 120000
index d8bcb642a24..00000000000
--- a/client/v3/example_maintenance_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../tests/integration/clientv3/examples/example_maintenance_test.go
\ No newline at end of file
diff --git a/client/v3/example_metrics_test.go b/client/v3/example_metrics_test.go
deleted file mode 120000
index a363c3c4fe0..00000000000
--- a/client/v3/example_metrics_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../tests/integration/clientv3/examples/example_metrics_test.go
\ No newline at end of file
diff --git a/client/v3/example_test.go b/client/v3/example_test.go
deleted file mode 120000
index ddacab0d6c4..00000000000
--- a/client/v3/example_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../tests/integration/clientv3/examples/example_test.go
\ No newline at end of file
diff --git a/client/v3/example_watch_test.go b/client/v3/example_watch_test.go
deleted file mode 120000
index fb748bed55d..00000000000
--- a/client/v3/example_watch_test.go
+++ /dev/null
@@ -1 +0,0 @@
-../../tests/integration/clientv3/examples/example_watch_test.go
\ No newline at end of file
diff --git a/client/v3/experimental/recipes/client.go b/client/v3/experimental/recipes/client.go
deleted file mode 100644
index 6dd5b13a6a5..00000000000
--- a/client/v3/experimental/recipes/client.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package recipe
-
-import (
- "context"
- "errors"
-
- spb "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
-)
-
-var (
- ErrKeyExists = errors.New("key already exists")
- ErrWaitMismatch = errors.New("unexpected wait result")
- ErrTooManyClients = errors.New("too many clients")
- ErrNoWatcher = errors.New("no watcher channel")
-)
-
-// deleteRevKey deletes a key by revision, returning false if key is missing
-func deleteRevKey(kv v3.KV, key string, rev int64) (bool, error) {
- cmp := v3.Compare(v3.ModRevision(key), "=", rev)
- req := v3.OpDelete(key)
- txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit()
- if err != nil {
- return false, err
- } else if !txnresp.Succeeded {
- return false, nil
- }
- return true, nil
-}
-
-func claimFirstKey(kv v3.KV, kvs []*spb.KeyValue) (*spb.KeyValue, error) {
- for _, k := range kvs {
- ok, err := deleteRevKey(kv, string(k.Key), k.ModRevision)
- if err != nil {
- return nil, err
- } else if ok {
- return k, nil
- }
- }
- return nil, nil
-}
diff --git a/client/v3/experimental/recipes/double_barrier.go b/client/v3/experimental/recipes/double_barrier.go
deleted file mode 100644
index cc2416db23b..00000000000
--- a/client/v3/experimental/recipes/double_barrier.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package recipe
-
-import (
- "context"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
-)
-
-// DoubleBarrier blocks processes on Enter until an expected count enters, then
-// blocks again on Leave until all processes have left.
-type DoubleBarrier struct {
- s *concurrency.Session
- ctx context.Context
-
- key string // key for the collective barrier
- count int
- myKey *EphemeralKV // current key for this process on the barrier
-}
-
-func NewDoubleBarrier(s *concurrency.Session, key string, count int) *DoubleBarrier {
- return &DoubleBarrier{
- s: s,
- ctx: context.TODO(),
- key: key,
- count: count,
- }
-}
-
-// Enter waits for "count" processes to enter the barrier then returns
-func (b *DoubleBarrier) Enter() error {
- client := b.s.Client()
-
- // Check the entered clients before creating the UniqueEphemeralKey,
- // fail the request if there are already too many clients.
- if resp1, err := b.enteredClients(client); err != nil {
- return err
- } else if len(resp1.Kvs) >= b.count {
- return ErrTooManyClients
- }
-
- ek, err := newUniqueEphemeralKey(b.s, b.key+"/waiters")
- if err != nil {
- return err
- }
- b.myKey = ek
-
- // Check the entered clients after creating the UniqueEphemeralKey
- resp2, err := b.enteredClients(client)
- if err != nil {
- return err
- }
- if len(resp2.Kvs) >= b.count {
- lastWaiter := resp2.Kvs[b.count-1]
- if ek.rev > lastWaiter.CreateRevision {
- // delete itself now, otherwise other processes may need to wait
- // until these keys are automatically deleted when the related
- // lease expires.
- if err = b.myKey.Delete(); err != nil {
- // Nothing to do here. We have to wait for the key to be
- // deleted when the lease expires.
- }
- return ErrTooManyClients
- }
-
- if ek.rev == lastWaiter.CreateRevision {
- // TODO(ahrtr): we might need to compare ek.key and
- // string(lastWaiter.Key), they should be equal.
- // unblock all other waiters
- _, err = client.Put(b.ctx, b.key+"/ready", "")
- return err
- }
- }
-
- _, err = WaitEvents(
- client,
- b.key+"/ready",
- ek.Revision(),
- []mvccpb.Event_EventType{mvccpb.PUT})
- return err
-}
-
-// enteredClients gets all the entered clients, which are ordered by the
-// createRevision in ascending order.
-func (b *DoubleBarrier) enteredClients(cli *clientv3.Client) (*clientv3.GetResponse, error) {
- resp, err := cli.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix(),
- clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend))
- if err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-// Leave waits for "count" processes to leave the barrier then returns
-func (b *DoubleBarrier) Leave() error {
- client := b.s.Client()
- resp, err := client.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix())
- if err != nil {
- return err
- }
- if len(resp.Kvs) == 0 {
- return nil
- }
-
- lowest, highest := resp.Kvs[0], resp.Kvs[0]
- for _, k := range resp.Kvs {
- if k.ModRevision < lowest.ModRevision {
- lowest = k
- }
- if k.ModRevision > highest.ModRevision {
- highest = k
- }
- }
- isLowest := string(lowest.Key) == b.myKey.Key()
-
- if len(resp.Kvs) == 1 && isLowest {
- // this is the only node in the barrier; finish up
- if _, err = client.Delete(b.ctx, b.key+"/ready"); err != nil {
- return err
- }
- return b.myKey.Delete()
- }
-
- // this ensures that if a process fails, the ephemeral lease will be
- // revoked, its barrier key is removed, and the barrier can resume
-
- // lowest process in node => wait on highest process
- if isLowest {
- _, err = WaitEvents(
- client,
- string(highest.Key),
- highest.ModRevision,
- []mvccpb.Event_EventType{mvccpb.DELETE})
- if err != nil {
- return err
- }
- return b.Leave()
- }
-
- // delete self and wait on lowest process
- if err = b.myKey.Delete(); err != nil {
- return err
- }
-
- key := string(lowest.Key)
- _, err = WaitEvents(
- client,
- key,
- lowest.ModRevision,
- []mvccpb.Event_EventType{mvccpb.DELETE})
- if err != nil {
- return err
- }
- return b.Leave()
-}
diff --git a/client/v3/experimental/recipes/key.go b/client/v3/experimental/recipes/key.go
deleted file mode 100644
index 10362c18fbe..00000000000
--- a/client/v3/experimental/recipes/key.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package recipe
-
-import (
- "context"
- "fmt"
- "strings"
- "time"
-
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
-)
-
-// RemoteKV is a key/revision pair created by the client and stored on etcd
-type RemoteKV struct {
- kv v3.KV
- key string
- rev int64
- val string
-}
-
-func newKey(kv v3.KV, key string, leaseID v3.LeaseID) (*RemoteKV, error) {
- return newKV(kv, key, "", leaseID)
-}
-
-func newKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (*RemoteKV, error) {
- rev, err := putNewKV(kv, key, val, leaseID)
- if err != nil {
- return nil, err
- }
- return &RemoteKV{kv, key, rev, val}, nil
-}
-
-func newUniqueKV(kv v3.KV, prefix string, val string) (*RemoteKV, error) {
- for {
- newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano())
- rev, err := putNewKV(kv, newKey, val, v3.NoLease)
- if err == nil {
- return &RemoteKV{kv, newKey, rev, val}, nil
- }
- if err != ErrKeyExists {
- return nil, err
- }
- }
-}
-
-// putNewKV attempts to create the given key, only succeeding if the key did
-// not yet exist.
-func putNewKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (int64, error) {
- cmp := v3.Compare(v3.Version(key), "=", 0)
- req := v3.OpPut(key, val, v3.WithLease(leaseID))
- txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit()
- if err != nil {
- return 0, err
- }
- if !txnresp.Succeeded {
- return 0, ErrKeyExists
- }
- return txnresp.Header.Revision, nil
-}
-
-// newSequentialKV allocates a new sequential key /nnnnn with a given
-// prefix and value. Note: a bookkeeping node __ is also allocated.
-func newSequentialKV(kv v3.KV, prefix, val string) (*RemoteKV, error) {
- resp, err := kv.Get(context.TODO(), prefix, v3.WithLastKey()...)
- if err != nil {
- return nil, err
- }
-
- // add 1 to last key, if any
- newSeqNum := 0
- if len(resp.Kvs) != 0 {
- fields := strings.Split(string(resp.Kvs[0].Key), "/")
- _, serr := fmt.Sscanf(fields[len(fields)-1], "%d", &newSeqNum)
- if serr != nil {
- return nil, serr
- }
- newSeqNum++
- }
- newKey := fmt.Sprintf("%s/%016d", prefix, newSeqNum)
-
- // base prefix key must be current (i.e., <=) with the server update;
- // the base key is important to avoid the following:
- // N1: LastKey() == 1, start txn.
- // N2: new Key 2, new Key 3, Delete Key 2
- // N1: txn succeeds allocating key 2 when it shouldn't
- baseKey := "__" + prefix
-
- // current revision might contain modification so +1
- cmp := v3.Compare(v3.ModRevision(baseKey), "<", resp.Header.Revision+1)
- reqPrefix := v3.OpPut(baseKey, "")
- reqnewKey := v3.OpPut(newKey, val)
-
- txn := kv.Txn(context.TODO())
- txnresp, err := txn.If(cmp).Then(reqPrefix, reqnewKey).Commit()
- if err != nil {
- return nil, err
- }
- if !txnresp.Succeeded {
- return newSequentialKV(kv, prefix, val)
- }
- return &RemoteKV{kv, newKey, txnresp.Header.Revision, val}, nil
-}
-
-func (rk *RemoteKV) Key() string { return rk.key }
-func (rk *RemoteKV) Revision() int64 { return rk.rev }
-func (rk *RemoteKV) Value() string { return rk.val }
-
-func (rk *RemoteKV) Delete() error {
- if rk.kv == nil {
- return nil
- }
- _, err := rk.kv.Delete(context.TODO(), rk.key)
- rk.kv = nil
- return err
-}
-
-func (rk *RemoteKV) Put(val string) error {
- _, err := rk.kv.Put(context.TODO(), rk.key, val)
- return err
-}
-
-// EphemeralKV is a new key associated with a session lease
-type EphemeralKV struct{ RemoteKV }
-
-// newEphemeralKV creates a new key/value pair associated with a session lease
-func newEphemeralKV(s *concurrency.Session, key, val string) (*EphemeralKV, error) {
- k, err := newKV(s.Client(), key, val, s.Lease())
- if err != nil {
- return nil, err
- }
- return &EphemeralKV{*k}, nil
-}
-
-// newUniqueEphemeralKey creates a new unique valueless key associated with a session lease
-func newUniqueEphemeralKey(s *concurrency.Session, prefix string) (*EphemeralKV, error) {
- return newUniqueEphemeralKV(s, prefix, "")
-}
-
-// newUniqueEphemeralKV creates a new unique key/value pair associated with a session lease
-func newUniqueEphemeralKV(s *concurrency.Session, prefix, val string) (ek *EphemeralKV, err error) {
- for {
- newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano())
- ek, err = newEphemeralKV(s, newKey, val)
- if err == nil || err != ErrKeyExists {
- break
- }
- }
- return ek, err
-}
diff --git a/client/v3/experimental/recipes/queue.go b/client/v3/experimental/recipes/queue.go
deleted file mode 100644
index 9c6b0378e11..00000000000
--- a/client/v3/experimental/recipes/queue.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package recipe
-
-import (
- "context"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
-)
-
-// Queue implements a multi-reader, multi-writer distributed queue.
-type Queue struct {
- client *v3.Client
- ctx context.Context
-
- keyPrefix string
-}
-
-func NewQueue(client *v3.Client, keyPrefix string) *Queue {
- return &Queue{client, context.TODO(), keyPrefix}
-}
-
-func (q *Queue) Enqueue(val string) error {
- _, err := newUniqueKV(q.client, q.keyPrefix, val)
- return err
-}
-
-// Dequeue returns Enqueue()'d elements in FIFO order. If the
-// queue is empty, Dequeue blocks until elements are available.
-func (q *Queue) Dequeue() (string, error) {
- // TODO: fewer round trips by fetching more than one key
- resp, err := q.client.Get(q.ctx, q.keyPrefix, v3.WithFirstRev()...)
- if err != nil {
- return "", err
- }
-
- kv, err := claimFirstKey(q.client, resp.Kvs)
- if err != nil {
- return "", err
- } else if kv != nil {
- return string(kv.Value), nil
- } else if resp.More {
- // missed some items, retry to read in more
- return q.Dequeue()
- }
-
- // nothing yet; wait on elements
- ev, err := WaitPrefixEvents(
- q.client,
- q.keyPrefix,
- resp.Header.Revision,
- []mvccpb.Event_EventType{mvccpb.PUT})
- if err != nil {
- return "", err
- }
-
- ok, err := deleteRevKey(q.client, string(ev.Kv.Key), ev.Kv.ModRevision)
- if err != nil {
- return "", err
- } else if !ok {
- return q.Dequeue()
- }
- return string(ev.Kv.Value), err
-}
diff --git a/client/v3/experimental/recipes/watch.go b/client/v3/experimental/recipes/watch.go
deleted file mode 100644
index 92e7bc648f4..00000000000
--- a/client/v3/experimental/recipes/watch.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package recipe
-
-import (
- "context"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-// WaitEvents waits on a key until it observes the given events and returns the final one.
-func WaitEvents(c *clientv3.Client, key string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- wc := c.Watch(ctx, key, clientv3.WithRev(rev))
- if wc == nil {
- return nil, ErrNoWatcher
- }
- return waitEvents(wc, evs), nil
-}
-
-func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- wc := c.Watch(ctx, prefix, clientv3.WithPrefix(), clientv3.WithRev(rev))
- if wc == nil {
- return nil, ErrNoWatcher
- }
- return waitEvents(wc, evs), nil
-}
-
-func waitEvents(wc clientv3.WatchChan, evs []mvccpb.Event_EventType) *clientv3.Event {
- i := 0
- for wresp := range wc {
- for _, ev := range wresp.Events {
- if ev.Type == evs[i] {
- i++
- if i == len(evs) {
- return ev
- }
- }
- }
- }
- return nil
-}
diff --git a/client/v3/go.mod b/client/v3/go.mod
deleted file mode 100644
index 14ecdc8f239..00000000000
--- a/client/v3/go.mod
+++ /dev/null
@@ -1,54 +0,0 @@
-module go.etcd.io/etcd/client/v3
-
-go 1.19
-
-require (
- github.com/dustin/go-humanize v1.0.1
- github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
- github.com/prometheus/client_golang v1.14.0
- github.com/stretchr/testify v1.8.1
- go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
- go.uber.org/zap v1.24.0
- google.golang.org/grpc v1.51.0
- sigs.k8s.io/yaml v1.3.0
-)
-
-require (
- github.com/benbjohnson/clock v1.1.0 // indirect
- github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
- github.com/coreos/go-semver v0.3.1 // indirect
- github.com/coreos/go-systemd/v22 v22.5.0 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/procfs v0.8.0 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.9.0 // indirect
- golang.org/x/net v0.4.0 // indirect
- golang.org/x/sys v0.3.0 // indirect
- golang.org/x/text v0.5.0 // indirect
- google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect
- google.golang.org/protobuf v1.28.1 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
-)
-
-replace (
- go.etcd.io/etcd/api/v3 => ../../api
- go.etcd.io/etcd/client/pkg/v3 => ../pkg
-)
-
-// Bad imports are sometimes causing attempts to pull that code.
-// This makes the error more explicit.
-replace (
- go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/pkg/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/tests/v3 => ./FORBIDDEN_DEPENDENCY
-)
diff --git a/client/v3/go.sum b/client/v3/go.sum
deleted file mode 100644
index 2ac2610010b..00000000000
--- a/client/v3/go.sum
+++ /dev/null
@@ -1,554 +0,0 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
-github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
-github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
-github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
-github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
-github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
-github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
-go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
-go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
-golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
-golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
-google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
-sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/client/v3/internal/endpoint/endpoint.go b/client/v3/internal/endpoint/endpoint.go
deleted file mode 100644
index f6674235cd9..00000000000
--- a/client/v3/internal/endpoint/endpoint.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package endpoint
-
-import (
- "fmt"
- "net"
- "net/url"
- "path"
- "strings"
-)
-
-type CredsRequirement int
-
-const (
- // CREDS_REQUIRE - Credentials/certificate required for thi type of connection.
- CREDS_REQUIRE CredsRequirement = iota
- // CREDS_DROP - Credentials/certificate not needed and should get ignored.
- CREDS_DROP
- // CREDS_OPTIONAL - Credentials/certificate might be used if supplied
- CREDS_OPTIONAL
-)
-
-func extractHostFromHostPort(ep string) string {
- host, _, err := net.SplitHostPort(ep)
- if err != nil {
- return ep
- }
- return host
-}
-
-func extractHostFromPath(pathStr string) string {
- return extractHostFromHostPort(path.Base(pathStr))
-}
-
-// mustSplit2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", "", false) instead.
-func mustSplit2(s, sep string) (string, string) {
- spl := strings.SplitN(s, sep, 2)
- if len(spl) < 2 {
- panic(fmt.Errorf("token '%v' expected to have separator sep: `%v`", s, sep))
- }
- return spl[0], spl[1]
-}
-
-func schemeToCredsRequirement(schema string) CredsRequirement {
- switch schema {
- case "https", "unixs":
- return CREDS_REQUIRE
- case "http":
- return CREDS_DROP
- case "unix":
- // Preserving previous behavior from:
- // https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212
- // that likely was a bug due to missing 'fallthrough'.
- // At the same time it seems legit to let the users decide whether they
- // want credential control or not (and 'unixs' schema is not a standard thing).
- return CREDS_OPTIONAL
- case "":
- return CREDS_OPTIONAL
- default:
- return CREDS_OPTIONAL
- }
-}
-
-// This function translates endpoints names supported by etcd server into
-// endpoints as supported by grpc with additional information
-// (server_name for cert validation, requireCreds - whether certs are needed).
-// The main differences:
-// - etcd supports unixs & https names as opposed to unix & http to
-// distinguish need to configure certificates.
-// - etcd support http(s) names as opposed to tcp supported by grpc/dial method.
-// - etcd supports unix(s)://local-file naming schema
-// (as opposed to unix:local-file canonical name used by grpc for current dir files).
-// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon)
-// is considered serverName - to allow local testing of cert-protected communication.
-//
-// See more:
-// - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47
-// - https://golang.org/pkg/net/#Dial
-// - https://github.com/grpc/grpc/blob/master/doc/naming.md
-func translateEndpoint(ep string) (addr string, serverName string, requireCreds CredsRequirement) {
- if strings.HasPrefix(ep, "unix:") || strings.HasPrefix(ep, "unixs:") {
- if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") {
- // absolute path case
- schema, absolutePath := mustSplit2(ep, "://")
- return "unix://" + absolutePath, extractHostFromPath(absolutePath), schemeToCredsRequirement(schema)
- }
- if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") {
- // legacy etcd local path
- schema, localPath := mustSplit2(ep, "://")
- return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema)
- }
- schema, localPath := mustSplit2(ep, ":")
- return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema)
- }
-
- if strings.Contains(ep, "://") {
- url, err := url.Parse(ep)
- if err != nil {
- return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL
- }
- if url.Scheme == "http" || url.Scheme == "https" {
- return url.Host, url.Hostname(), schemeToCredsRequirement(url.Scheme)
- }
- return ep, url.Hostname(), schemeToCredsRequirement(url.Scheme)
- }
- // Handles plain addresses like 10.0.0.44:437.
- return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL
-}
-
-// RequiresCredentials returns whether given endpoint requires
-// credentials/certificates for connection.
-func RequiresCredentials(ep string) CredsRequirement {
- _, _, requireCreds := translateEndpoint(ep)
- return requireCreds
-}
-
-// Interpret endpoint parses an endpoint of the form
-// (http|https)://*|(unix|unixs)://)
-// and returns low-level address (supported by 'net') to connect to,
-// and a server name used for x509 certificate matching.
-func Interpret(ep string) (address string, serverName string) {
- addr, serverName, _ := translateEndpoint(ep)
- return addr, serverName
-}
diff --git a/client/v3/internal/endpoint/endpoint_test.go b/client/v3/internal/endpoint/endpoint_test.go
deleted file mode 100644
index bc6cd71399c..00000000000
--- a/client/v3/internal/endpoint/endpoint_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package endpoint
-
-import (
- "testing"
-)
-
-func Test_interpret(t *testing.T) {
- tests := []struct {
- endpoint string
- wantAddress string
- wantServerName string
- wantRequiresCreds CredsRequirement
- }{
- {"127.0.0.1", "127.0.0.1", "127.0.0.1", CREDS_OPTIONAL},
- {"localhost", "localhost", "localhost", CREDS_OPTIONAL},
- {"localhost:8080", "localhost:8080", "localhost", CREDS_OPTIONAL},
-
- {"unix:127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_OPTIONAL},
- {"unix:127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_OPTIONAL},
-
- {"unix://127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_OPTIONAL},
- {"unix://127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_OPTIONAL},
-
- {"unixs:127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_REQUIRE},
- {"unixs:127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_REQUIRE},
- {"unixs://127.0.0.1", "unix:127.0.0.1", "127.0.0.1", CREDS_REQUIRE},
- {"unixs://127.0.0.1:8080", "unix:127.0.0.1:8080", "127.0.0.1", CREDS_REQUIRE},
-
- {"http://127.0.0.1", "127.0.0.1", "127.0.0.1", CREDS_DROP},
- {"http://127.0.0.1:8080", "127.0.0.1:8080", "127.0.0.1", CREDS_DROP},
- {"https://127.0.0.1", "127.0.0.1", "127.0.0.1", CREDS_REQUIRE},
- {"https://127.0.0.1:8080", "127.0.0.1:8080", "127.0.0.1", CREDS_REQUIRE},
- {"https://localhost:20000", "localhost:20000", "localhost", CREDS_REQUIRE},
-
- {"unix:///tmp/abc", "unix:///tmp/abc", "abc", CREDS_OPTIONAL},
- {"unixs:///tmp/abc", "unix:///tmp/abc", "abc", CREDS_REQUIRE},
- {"unix:///tmp/abc:1234", "unix:///tmp/abc:1234", "abc", CREDS_OPTIONAL},
- {"unixs:///tmp/abc:1234", "unix:///tmp/abc:1234", "abc", CREDS_REQUIRE},
- {"etcd.io", "etcd.io", "etcd.io", CREDS_OPTIONAL},
- {"http://etcd.io/abc", "etcd.io", "etcd.io", CREDS_DROP},
- {"dns://something-other", "dns://something-other", "something-other", CREDS_OPTIONAL},
-
- {"http://[2001:db8:1f70::999:de8:7648:6e8]:100/", "[2001:db8:1f70::999:de8:7648:6e8]:100", "2001:db8:1f70::999:de8:7648:6e8", CREDS_DROP},
- {"[2001:db8:1f70::999:de8:7648:6e8]:100", "[2001:db8:1f70::999:de8:7648:6e8]:100", "2001:db8:1f70::999:de8:7648:6e8", CREDS_OPTIONAL},
- {"unix:unexpected-file_name#123$456", "unix:unexpected-file_name#123$456", "unexpected-file_name#123$456", CREDS_OPTIONAL},
- }
- for _, tt := range tests {
- t.Run("Interpret_"+tt.endpoint, func(t *testing.T) {
- gotAddress, gotServerName := Interpret(tt.endpoint)
- if gotAddress != tt.wantAddress {
- t.Errorf("Interpret() gotAddress = %v, want %v", gotAddress, tt.wantAddress)
- }
- if gotServerName != tt.wantServerName {
- t.Errorf("Interpret() gotServerName = %v, want %v", gotServerName, tt.wantServerName)
- }
- })
- t.Run("RequiresCredentials_"+tt.endpoint, func(t *testing.T) {
- requiresCreds := RequiresCredentials(tt.endpoint)
- if requiresCreds != tt.wantRequiresCreds {
- t.Errorf("RequiresCredentials() got = %v, want %v", requiresCreds, tt.wantRequiresCreds)
- }
- })
- }
-}
-
-func Test_extractHostFromHostPort(t *testing.T) {
- tests := []struct {
- ep string
- want string
- }{
- {ep: "localhost", want: "localhost"},
- {ep: "localhost:8080", want: "localhost"},
- {ep: "192.158.7.14:8080", want: "192.158.7.14"},
- {ep: "192.158.7.14:8080", want: "192.158.7.14"},
- {ep: "[2001:db8:1f70::999:de8:7648:6e8]", want: "[2001:db8:1f70::999:de8:7648:6e8]"},
- {ep: "[2001:db8:1f70::999:de8:7648:6e8]:100", want: "2001:db8:1f70::999:de8:7648:6e8"},
- }
- for _, tt := range tests {
- t.Run(tt.ep, func(t *testing.T) {
- if got := extractHostFromHostPort(tt.ep); got != tt.want {
- t.Errorf("extractHostFromHostPort() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/client/v3/internal/resolver/resolver.go b/client/v3/internal/resolver/resolver.go
deleted file mode 100644
index b5c9de00786..00000000000
--- a/client/v3/internal/resolver/resolver.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package resolver
-
-import (
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/resolver/manual"
- "google.golang.org/grpc/serviceconfig"
-
- "go.etcd.io/etcd/client/v3/internal/endpoint"
-)
-
-const (
- Schema = "etcd-endpoints"
-)
-
-// EtcdManualResolver is a Resolver (and resolver.Builder) that can be updated
-// using SetEndpoints.
-type EtcdManualResolver struct {
- *manual.Resolver
- endpoints []string
- serviceConfig *serviceconfig.ParseResult
-}
-
-func New(endpoints ...string) *EtcdManualResolver {
- r := manual.NewBuilderWithScheme(Schema)
- return &EtcdManualResolver{Resolver: r, endpoints: endpoints, serviceConfig: nil}
-}
-
-// Build returns itself for Resolver, because it's both a builder and a resolver.
-func (r *EtcdManualResolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
- r.serviceConfig = cc.ParseServiceConfig(`{"loadBalancingPolicy": "round_robin"}`)
- if r.serviceConfig.Err != nil {
- return nil, r.serviceConfig.Err
- }
- res, err := r.Resolver.Build(target, cc, opts)
- if err != nil {
- return nil, err
- }
- // Populates endpoints stored in r into ClientConn (cc).
- r.updateState()
- return res, nil
-}
-
-func (r *EtcdManualResolver) SetEndpoints(endpoints []string) {
- r.endpoints = endpoints
- r.updateState()
-}
-
-func (r EtcdManualResolver) updateState() {
- if r.CC != nil {
- addresses := make([]resolver.Address, len(r.endpoints))
- for i, ep := range r.endpoints {
- addr, serverName := endpoint.Interpret(ep)
- addresses[i] = resolver.Address{Addr: addr, ServerName: serverName}
- }
- state := resolver.State{
- Addresses: addresses,
- ServiceConfig: r.serviceConfig,
- }
- r.UpdateState(state)
- }
-}
diff --git a/client/v3/kv.go b/client/v3/kv.go
deleted file mode 100644
index f50f9595ce1..00000000000
--- a/client/v3/kv.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- "google.golang.org/grpc"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
-)
-
-type (
- CompactResponse pb.CompactionResponse
- PutResponse pb.PutResponse
- GetResponse pb.RangeResponse
- DeleteResponse pb.DeleteRangeResponse
- TxnResponse pb.TxnResponse
-)
-
-type KV interface {
- // Put puts a key-value pair into etcd.
- // Note that key,value can be plain bytes array and string is
- // an immutable representation of that bytes array.
- // To get a string of bytes, do string([]byte{0x10, 0x20}).
- Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
-
- // Get retrieves keys.
- // By default, Get will return the value for "key", if any.
- // When passed WithRange(end), Get will return the keys in the range [key, end).
- // When passed WithFromKey(), Get returns keys greater than or equal to key.
- // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
- // if the required revision is compacted, the request will fail with ErrCompacted .
- // When passed WithLimit(limit), the number of returned keys is bounded by limit.
- // When passed WithSort(), the keys will be sorted.
- Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
-
- // Delete deletes a key, or optionally using WithRange(end), [key, end).
- Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
-
- // Compact compacts etcd KV history before the given rev.
- Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
-
- // Do applies a single Op on KV without a transaction.
- // Do is useful when creating arbitrary operations to be issued at a
- // later time; the user can range over the operations, calling Do to
- // execute them. Get/Put/Delete, on the other hand, are best suited
- // for when the operation should be issued at the time of declaration.
- Do(ctx context.Context, op Op) (OpResponse, error)
-
- // Txn creates a transaction.
- Txn(ctx context.Context) Txn
-}
-
-type OpResponse struct {
- put *PutResponse
- get *GetResponse
- del *DeleteResponse
- txn *TxnResponse
-}
-
-func (op OpResponse) Put() *PutResponse { return op.put }
-func (op OpResponse) Get() *GetResponse { return op.get }
-func (op OpResponse) Del() *DeleteResponse { return op.del }
-func (op OpResponse) Txn() *TxnResponse { return op.txn }
-
-func (resp *PutResponse) OpResponse() OpResponse {
- return OpResponse{put: resp}
-}
-func (resp *GetResponse) OpResponse() OpResponse {
- return OpResponse{get: resp}
-}
-func (resp *DeleteResponse) OpResponse() OpResponse {
- return OpResponse{del: resp}
-}
-func (resp *TxnResponse) OpResponse() OpResponse {
- return OpResponse{txn: resp}
-}
-
-type kv struct {
- remote pb.KVClient
- callOpts []grpc.CallOption
-}
-
-func NewKV(c *Client) KV {
- api := &kv{remote: RetryKVClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
- api := &kv{remote: remote}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
- r, err := kv.Do(ctx, OpPut(key, val, opts...))
- return r.put, toErr(ctx, err)
-}
-
-func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
- r, err := kv.Do(ctx, OpGet(key, opts...))
- return r.get, toErr(ctx, err)
-}
-
-func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
- r, err := kv.Do(ctx, OpDelete(key, opts...))
- return r.del, toErr(ctx, err)
-}
-
-func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
- resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*CompactResponse)(resp), err
-}
-
-func (kv *kv) Txn(ctx context.Context) Txn {
- return &txn{
- kv: kv,
- ctx: ctx,
- callOpts: kv.callOpts,
- }
-}
-
-func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
- var err error
- switch op.t {
- case tRange:
- if op.IsSortOptionValid() {
- var resp *pb.RangeResponse
- resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
- if err == nil {
- return OpResponse{get: (*GetResponse)(resp)}, nil
- }
- } else {
- err = rpctypes.ErrInvalidSortOption
- }
- case tPut:
- var resp *pb.PutResponse
- r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
- resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
- if err == nil {
- return OpResponse{put: (*PutResponse)(resp)}, nil
- }
- case tDeleteRange:
- var resp *pb.DeleteRangeResponse
- r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
- resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
- if err == nil {
- return OpResponse{del: (*DeleteResponse)(resp)}, nil
- }
- case tTxn:
- var resp *pb.TxnResponse
- resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
- if err == nil {
- return OpResponse{txn: (*TxnResponse)(resp)}, nil
- }
- default:
- panic("Unknown op")
- }
- return OpResponse{}, toErr(ctx, err)
-}
diff --git a/client/v3/lease.go b/client/v3/lease.go
deleted file mode 100644
index 60d7dd18e83..00000000000
--- a/client/v3/lease.go
+++ /dev/null
@@ -1,610 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "sync"
- "time"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
-)
-
-type (
- LeaseRevokeResponse pb.LeaseRevokeResponse
- LeaseID int64
-)
-
-// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
-type LeaseGrantResponse struct {
- *pb.ResponseHeader
- ID LeaseID
- TTL int64
- Error string
-}
-
-// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
-type LeaseKeepAliveResponse struct {
- *pb.ResponseHeader
- ID LeaseID
- TTL int64
-}
-
-// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
-type LeaseTimeToLiveResponse struct {
- *pb.ResponseHeader
- ID LeaseID `json:"id"`
-
- // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
- TTL int64 `json:"ttl"`
-
- // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
- GrantedTTL int64 `json:"granted-ttl"`
-
- // Keys is the list of keys attached to this lease.
- Keys [][]byte `json:"keys"`
-}
-
-// LeaseStatus represents a lease status.
-type LeaseStatus struct {
- ID LeaseID `json:"id"`
- // TODO: TTL int64
-}
-
-// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
-type LeaseLeasesResponse struct {
- *pb.ResponseHeader
- Leases []LeaseStatus `json:"leases"`
-}
-
-const (
- // defaultTTL is the assumed lease TTL used for the first keepalive
- // deadline before the actual TTL is known to the client.
- defaultTTL = 5 * time.Second
- // NoLease is a lease ID for the absence of a lease.
- NoLease LeaseID = 0
-
- // retryConnWait is how long to wait before retrying request due to an error
- retryConnWait = 500 * time.Millisecond
-)
-
-// LeaseResponseChSize is the size of buffer to store unsent lease responses.
-// WARNING: DO NOT UPDATE.
-// Only for testing purposes.
-var LeaseResponseChSize = 16
-
-// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
-//
-// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
-type ErrKeepAliveHalted struct {
- Reason error
-}
-
-func (e ErrKeepAliveHalted) Error() string {
- s := "etcdclient: leases keep alive halted"
- if e.Reason != nil {
- s += ": " + e.Reason.Error()
- }
- return s
-}
-
-type Lease interface {
- // Grant creates a new lease.
- Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
-
- // Revoke revokes the given lease.
- Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
-
- // TimeToLive retrieves the lease information of the given lease ID.
- TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
-
- // Leases retrieves all leases.
- Leases(ctx context.Context) (*LeaseLeasesResponse, error)
-
- // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted
- // to the channel are not consumed promptly the channel may become full. When full, the lease
- // client will continue sending keep alive requests to the etcd server, but will drop responses
- // until there is capacity on the channel to send more responses.
- //
- // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or
- // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error
- // containing the error reason.
- //
- // The returned "LeaseKeepAliveResponse" channel closes if underlying keep
- // alive stream is interrupted in some way the client cannot handle itself;
- // given context "ctx" is canceled or timed out.
- //
- // TODO(v4.0): post errors to last keep alive message before closing
- // (see https://github.com/etcd-io/etcd/pull/7866)
- KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
-
- // KeepAliveOnce renews the lease once. The response corresponds to the
- // first message from calling KeepAlive. If the response has a recoverable
- // error, KeepAliveOnce will retry the RPC with a new keep alive message.
- //
- // In most of the cases, Keepalive should be used instead of KeepAliveOnce.
- KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
-
- // Close releases all resources Lease keeps for efficient communication
- // with the etcd server.
- Close() error
-}
-
-type lessor struct {
- mu sync.Mutex // guards all fields
-
- // donec is closed and loopErr is set when recvKeepAliveLoop stops
- donec chan struct{}
- loopErr error
-
- remote pb.LeaseClient
-
- stream pb.Lease_LeaseKeepAliveClient
- streamCancel context.CancelFunc
-
- stopCtx context.Context
- stopCancel context.CancelFunc
-
- keepAlives map[LeaseID]*keepAlive
-
- // firstKeepAliveTimeout is the timeout for the first keepalive request
- // before the actual TTL is known to the lease client
- firstKeepAliveTimeout time.Duration
-
- // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
- firstKeepAliveOnce sync.Once
-
- callOpts []grpc.CallOption
-
- lg *zap.Logger
-}
-
-// keepAlive multiplexes a keepalive for a lease over multiple channels
-type keepAlive struct {
- chs []chan<- *LeaseKeepAliveResponse
- ctxs []context.Context
- // deadline is the time the keep alive channels close if no response
- deadline time.Time
- // nextKeepAlive is when to send the next keep alive message
- nextKeepAlive time.Time
- // donec is closed on lease revoke, expiration, or cancel.
- donec chan struct{}
-}
-
-func NewLease(c *Client) Lease {
- return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
-}
-
-func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
- l := &lessor{
- donec: make(chan struct{}),
- keepAlives: make(map[LeaseID]*keepAlive),
- remote: remote,
- firstKeepAliveTimeout: keepAliveTimeout,
- lg: c.lg,
- }
- if l.firstKeepAliveTimeout == time.Second {
- l.firstKeepAliveTimeout = defaultTTL
- }
- if c != nil {
- l.callOpts = c.callOpts
- }
- reqLeaderCtx := WithRequireLeader(context.Background())
- l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
- return l
-}
-
-func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
- r := &pb.LeaseGrantRequest{TTL: ttl}
- resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
- if err == nil {
- gresp := &LeaseGrantResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- Error: resp.Error,
- }
- return gresp, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
- r := &pb.LeaseRevokeRequest{ID: int64(id)}
- resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
- if err == nil {
- return (*LeaseRevokeResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
- r := toLeaseTimeToLiveRequest(id, opts...)
- resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- gresp := &LeaseTimeToLiveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- GrantedTTL: resp.GrantedTTL,
- Keys: resp.Keys,
- }
- return gresp, nil
-}
-
-func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
- resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
- if err == nil {
- leases := make([]LeaseStatus, len(resp.Leases))
- for i := range resp.Leases {
- leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
- }
- return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
- ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
-
- l.mu.Lock()
- // ensure that recvKeepAliveLoop is still running
- select {
- case <-l.donec:
- err := l.loopErr
- l.mu.Unlock()
- close(ch)
- return ch, ErrKeepAliveHalted{Reason: err}
- default:
- }
- ka, ok := l.keepAlives[id]
- if !ok {
- // create fresh keep alive
- ka = &keepAlive{
- chs: []chan<- *LeaseKeepAliveResponse{ch},
- ctxs: []context.Context{ctx},
- deadline: time.Now().Add(l.firstKeepAliveTimeout),
- nextKeepAlive: time.Now(),
- donec: make(chan struct{}),
- }
- l.keepAlives[id] = ka
- } else {
- // add channel and context to existing keep alive
- ka.ctxs = append(ka.ctxs, ctx)
- ka.chs = append(ka.chs, ch)
- }
- l.mu.Unlock()
-
- go l.keepAliveCtxCloser(ctx, id, ka.donec)
- l.firstKeepAliveOnce.Do(func() {
- go l.recvKeepAliveLoop()
- go l.deadlineLoop()
- })
-
- return ch, nil
-}
-
-func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
- for {
- resp, err := l.keepAliveOnce(ctx, id)
- if err == nil {
- if resp.TTL <= 0 {
- err = rpctypes.ErrLeaseNotFound
- }
- return resp, err
- }
- if isHaltErr(ctx, err) {
- return nil, toErr(ctx, err)
- }
- }
-}
-
-func (l *lessor) Close() error {
- l.stopCancel()
- // close for synchronous teardown if stream goroutines never launched
- l.firstKeepAliveOnce.Do(func() { close(l.donec) })
- <-l.donec
- return nil
-}
-
-func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
- select {
- case <-donec:
- return
- case <-l.donec:
- return
- case <-ctx.Done():
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ka, ok := l.keepAlives[id]
- if !ok {
- return
- }
-
- // close channel and remove context if still associated with keep alive
- for i, c := range ka.ctxs {
- if c == ctx {
- close(ka.chs[i])
- ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
- ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
- break
- }
- }
- // remove if no one more listeners
- if len(ka.chs) == 0 {
- delete(l.keepAlives, id)
- }
-}
-
-// closeRequireLeader scans keepAlives for ctxs that have require leader
-// and closes the associated channels.
-func (l *lessor) closeRequireLeader() {
- l.mu.Lock()
- defer l.mu.Unlock()
- for _, ka := range l.keepAlives {
- reqIdxs := 0
- // find all required leader channels, close, mark as nil
- for i, ctx := range ka.ctxs {
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok {
- continue
- }
- ks := md[rpctypes.MetadataRequireLeaderKey]
- if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
- continue
- }
- close(ka.chs[i])
- ka.chs[i] = nil
- reqIdxs++
- }
- if reqIdxs == 0 {
- continue
- }
- // remove all channels that required a leader from keepalive
- newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
- newCtxs := make([]context.Context, len(newChs))
- newIdx := 0
- for i := range ka.chs {
- if ka.chs[i] == nil {
- continue
- }
- newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
- newIdx++
- }
- ka.chs, ka.ctxs = newChs, newCtxs
- }
-}
-
-func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKeepAliveResponse, ferr error) {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- defer func() {
- if err := stream.CloseSend(); err != nil {
- if ferr == nil {
- ferr = toErr(ctx, err)
- }
- return
- }
- }()
-
- err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- resp, rerr := stream.Recv()
- if rerr != nil {
- return nil, toErr(ctx, rerr)
- }
-
- karesp = &LeaseKeepAliveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- }
- return karesp, nil
-}
-
-func (l *lessor) recvKeepAliveLoop() (gerr error) {
- defer func() {
- l.mu.Lock()
- close(l.donec)
- l.loopErr = gerr
- for _, ka := range l.keepAlives {
- ka.close()
- }
- l.keepAlives = make(map[LeaseID]*keepAlive)
- l.mu.Unlock()
- }()
-
- for {
- stream, err := l.resetRecv()
- if err != nil {
- l.lg.Warn("error occurred during lease keep alive loop",
- zap.Error(err),
- )
- if canceledByCaller(l.stopCtx, err) {
- return err
- }
- } else {
- for {
- resp, err := stream.Recv()
- if err != nil {
- if canceledByCaller(l.stopCtx, err) {
- return err
- }
-
- if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
- l.closeRequireLeader()
- }
- break
- }
-
- l.recvKeepAlive(resp)
- }
- }
-
- select {
- case <-time.After(retryConnWait):
- case <-l.stopCtx.Done():
- return l.stopCtx.Err()
- }
- }
-}
-
-// resetRecv opens a new lease stream and starts sending keep alive requests.
-func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
- sctx, cancel := context.WithCancel(l.stopCtx)
- stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
- if err != nil {
- cancel()
- return nil, err
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
- if l.stream != nil && l.streamCancel != nil {
- l.streamCancel()
- }
-
- l.streamCancel = cancel
- l.stream = stream
-
- go l.sendKeepAliveLoop(stream)
- return stream, nil
-}
-
-// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
-func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
- karesp := &LeaseKeepAliveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ka, ok := l.keepAlives[karesp.ID]
- if !ok {
- return
- }
-
- if karesp.TTL <= 0 {
- // lease expired; close all keep alive channels
- delete(l.keepAlives, karesp.ID)
- ka.close()
- return
- }
-
- // send update to all channels
- nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
- ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
- for _, ch := range ka.chs {
- select {
- case ch <- karesp:
- default:
- if l.lg != nil {
- l.lg.Warn("lease keepalive response queue is full; dropping response send",
- zap.Int("queue-size", len(ch)),
- zap.Int("queue-capacity", cap(ch)),
- )
- }
- }
- // still advance in order to rate-limit keep-alive sends
- ka.nextKeepAlive = nextKeepAlive
- }
-}
-
-// deadlineLoop reaps any keep alive channels that have not received a response
-// within the lease TTL
-func (l *lessor) deadlineLoop() {
- for {
- select {
- case <-time.After(time.Second):
- case <-l.donec:
- return
- }
- now := time.Now()
- l.mu.Lock()
- for id, ka := range l.keepAlives {
- if ka.deadline.Before(now) {
- // waited too long for response; lease may be expired
- ka.close()
- delete(l.keepAlives, id)
- }
- }
- l.mu.Unlock()
- }
-}
-
-// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
-func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
- for {
- var tosend []LeaseID
-
- now := time.Now()
- l.mu.Lock()
- for id, ka := range l.keepAlives {
- if ka.nextKeepAlive.Before(now) {
- tosend = append(tosend, id)
- }
- }
- l.mu.Unlock()
-
- for _, id := range tosend {
- r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
- if err := stream.Send(r); err != nil {
- l.lg.Warn("error occurred during lease keep alive request sending",
- zap.Error(err),
- )
- return
- }
- }
-
- select {
- case <-time.After(retryConnWait):
- case <-stream.Context().Done():
- return
- case <-l.donec:
- return
- case <-l.stopCtx.Done():
- return
- }
- }
-}
-
-func (ka *keepAlive) close() {
- close(ka.donec)
- for _, ch := range ka.chs {
- close(ch)
- }
-}
diff --git a/client/v3/leasing/doc.go b/client/v3/leasing/doc.go
deleted file mode 100644
index c38af3562b7..00000000000
--- a/client/v3/leasing/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package leasing serves linearizable reads from a local cache by acquiring
-// exclusive write access to keys through a client-side leasing protocol. This
-// leasing layer can either directly wrap the etcd client or it can be exposed
-// through the etcd grpc proxy server, granting multiple clients write access.
-//
-// First, create a leasing KV from a clientv3.Client 'cli':
-//
-// lkv, err := leasing.NewKV(cli, "leasing-prefix")
-// if err != nil {
-// // handle error
-// }
-//
-// A range request for a key "abc" tries to acquire a leasing key so it can cache the range's
-// key locally. On the server, the leasing key is stored to "leasing-prefix/abc":
-//
-// resp, err := lkv.Get(context.TODO(), "abc")
-//
-// Future linearized read requests using 'lkv' will be served locally for the lease's lifetime:
-//
-// resp, err = lkv.Get(context.TODO(), "abc")
-//
-// If another leasing client writes to a leased key, then the owner relinquishes its exclusive
-// access, permitting the writer to modify the key:
-//
-// lkv2, err := leasing.NewKV(cli, "leasing-prefix")
-// if err != nil {
-// // handle error
-// }
-// lkv2.Put(context.TODO(), "abc", "456")
-// resp, err = lkv.Get("abc")
-package leasing
diff --git a/client/v3/leasing/kv.go b/client/v3/leasing/kv.go
deleted file mode 100644
index f0cded20fea..00000000000
--- a/client/v3/leasing/kv.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package leasing
-
-import (
- "context"
- "strings"
- "sync"
- "time"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-type leasingKV struct {
- cl *v3.Client
- kv v3.KV
- pfx string
- leases leaseCache
-
- ctx context.Context
- cancel context.CancelFunc
- wg sync.WaitGroup
-
- sessionOpts []concurrency.SessionOption
- session *concurrency.Session
- sessionc chan struct{}
-}
-
-var closedCh chan struct{}
-
-func init() {
- closedCh = make(chan struct{})
- close(closedCh)
-}
-
-// NewKV wraps a KV instance so that all requests are wired through a leasing protocol.
-func NewKV(cl *v3.Client, pfx string, opts ...concurrency.SessionOption) (v3.KV, func(), error) {
- cctx, cancel := context.WithCancel(cl.Ctx())
- lkv := &leasingKV{
- cl: cl,
- kv: cl.KV,
- pfx: pfx,
- leases: leaseCache{revokes: make(map[string]time.Time)},
- ctx: cctx,
- cancel: cancel,
- sessionOpts: opts,
- sessionc: make(chan struct{}),
- }
- lkv.wg.Add(2)
- go func() {
- defer lkv.wg.Done()
- lkv.monitorSession()
- }()
- go func() {
- defer lkv.wg.Done()
- lkv.leases.clearOldRevokes(cctx)
- }()
- return lkv, lkv.Close, lkv.waitSession(cctx)
-}
-
-func (lkv *leasingKV) Close() {
- lkv.cancel()
- lkv.wg.Wait()
-}
-
-func (lkv *leasingKV) Get(ctx context.Context, key string, opts ...v3.OpOption) (*v3.GetResponse, error) {
- return lkv.get(ctx, v3.OpGet(key, opts...))
-}
-
-func (lkv *leasingKV) Put(ctx context.Context, key, val string, opts ...v3.OpOption) (*v3.PutResponse, error) {
- return lkv.put(ctx, v3.OpPut(key, val, opts...))
-}
-
-func (lkv *leasingKV) Delete(ctx context.Context, key string, opts ...v3.OpOption) (*v3.DeleteResponse, error) {
- return lkv.delete(ctx, v3.OpDelete(key, opts...))
-}
-
-func (lkv *leasingKV) Do(ctx context.Context, op v3.Op) (v3.OpResponse, error) {
- switch {
- case op.IsGet():
- resp, err := lkv.get(ctx, op)
- return resp.OpResponse(), err
- case op.IsPut():
- resp, err := lkv.put(ctx, op)
- return resp.OpResponse(), err
- case op.IsDelete():
- resp, err := lkv.delete(ctx, op)
- return resp.OpResponse(), err
- case op.IsTxn():
- cmps, thenOps, elseOps := op.Txn()
- resp, err := lkv.Txn(ctx).If(cmps...).Then(thenOps...).Else(elseOps...).Commit()
- return resp.OpResponse(), err
- }
- return v3.OpResponse{}, nil
-}
-
-func (lkv *leasingKV) Compact(ctx context.Context, rev int64, opts ...v3.CompactOption) (*v3.CompactResponse, error) {
- return lkv.kv.Compact(ctx, rev, opts...)
-}
-
-func (lkv *leasingKV) Txn(ctx context.Context) v3.Txn {
- return &txnLeasing{Txn: lkv.kv.Txn(ctx), lkv: lkv, ctx: ctx}
-}
-
-func (lkv *leasingKV) monitorSession() {
- for lkv.ctx.Err() == nil {
- if lkv.session != nil {
- select {
- case <-lkv.session.Done():
- case <-lkv.ctx.Done():
- return
- }
- }
- lkv.leases.mu.Lock()
- select {
- case <-lkv.sessionc:
- lkv.sessionc = make(chan struct{})
- default:
- }
- lkv.leases.entries = make(map[string]*leaseKey)
- lkv.leases.mu.Unlock()
-
- s, err := concurrency.NewSession(lkv.cl, lkv.sessionOpts...)
- if err != nil {
- continue
- }
-
- lkv.leases.mu.Lock()
- lkv.session = s
- close(lkv.sessionc)
- lkv.leases.mu.Unlock()
- }
-}
-
-func (lkv *leasingKV) monitorLease(ctx context.Context, key string, rev int64) {
- cctx, cancel := context.WithCancel(lkv.ctx)
- defer cancel()
- for cctx.Err() == nil {
- if rev == 0 {
- resp, err := lkv.kv.Get(ctx, lkv.pfx+key)
- if err != nil {
- continue
- }
- rev = resp.Header.Revision
- if len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) == "REVOKE" {
- lkv.rescind(cctx, key, rev)
- return
- }
- }
- wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1))
- for resp := range wch {
- for _, ev := range resp.Events {
- if string(ev.Kv.Value) != "REVOKE" {
- continue
- }
- if v3.LeaseID(ev.Kv.Lease) == lkv.leaseID() {
- lkv.rescind(cctx, key, ev.Kv.ModRevision)
- }
- return
- }
- }
- rev = 0
- }
-}
-
-// rescind releases a lease from this client.
-func (lkv *leasingKV) rescind(ctx context.Context, key string, rev int64) {
- if lkv.leases.Evict(key) > rev {
- return
- }
- cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev)
- op := v3.OpDelete(lkv.pfx + key)
- for ctx.Err() == nil {
- if _, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit(); err == nil {
- return
- }
- }
-}
-
-func (lkv *leasingKV) waitRescind(ctx context.Context, key string, rev int64) error {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
- wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1))
- for resp := range wch {
- for _, ev := range resp.Events {
- if ev.Type == v3.EventTypeDelete {
- return ctx.Err()
- }
- }
- }
- return ctx.Err()
-}
-
-func (lkv *leasingKV) tryModifyOp(ctx context.Context, op v3.Op) (*v3.TxnResponse, chan<- struct{}, error) {
- key := string(op.KeyBytes())
- wc, rev := lkv.leases.Lock(key)
- cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)
- resp, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit()
- switch {
- case err != nil:
- lkv.leases.Evict(key)
- fallthrough
- case !resp.Succeeded:
- if wc != nil {
- close(wc)
- }
- return nil, nil, err
- }
- return resp, wc, nil
-}
-
-func (lkv *leasingKV) put(ctx context.Context, op v3.Op) (pr *v3.PutResponse, err error) {
- if err := lkv.waitSession(ctx); err != nil {
- return nil, err
- }
- for ctx.Err() == nil {
- resp, wc, err := lkv.tryModifyOp(ctx, op)
- if err != nil || wc == nil {
- resp, err = lkv.revoke(ctx, string(op.KeyBytes()), op)
- }
- if err != nil {
- return nil, err
- }
- if resp.Succeeded {
- lkv.leases.mu.Lock()
- lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), resp.Header)
- lkv.leases.mu.Unlock()
- pr = (*v3.PutResponse)(resp.Responses[0].GetResponsePut())
- pr.Header = resp.Header
- }
- if wc != nil {
- close(wc)
- }
- if resp.Succeeded {
- return pr, nil
- }
- }
- return nil, ctx.Err()
-}
-
-func (lkv *leasingKV) acquire(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) {
- for ctx.Err() == nil {
- if err := lkv.waitSession(ctx); err != nil {
- return nil, err
- }
- lcmp := v3.Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
- resp, err := lkv.kv.Txn(ctx).If(
- v3.Compare(v3.CreateRevision(lkv.pfx+key), "=", 0),
- v3.Compare(lcmp, "=", 0)).
- Then(
- op,
- v3.OpPut(lkv.pfx+key, "", v3.WithLease(lkv.leaseID()))).
- Else(
- op,
- v3.OpGet(lkv.pfx+key),
- ).Commit()
- if err == nil {
- if !resp.Succeeded {
- kvs := resp.Responses[1].GetResponseRange().Kvs
- // if txn failed since already owner, lease is acquired
- resp.Succeeded = len(kvs) > 0 && v3.LeaseID(kvs[0].Lease) == lkv.leaseID()
- }
- return resp, nil
- }
- // retry if transient error
- if _, ok := err.(rpctypes.EtcdError); ok {
- return nil, err
- }
- if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
- return nil, err
- }
- }
- return nil, ctx.Err()
-}
-
-func (lkv *leasingKV) get(ctx context.Context, op v3.Op) (*v3.GetResponse, error) {
- do := func() (*v3.GetResponse, error) {
- r, err := lkv.kv.Do(ctx, op)
- return r.Get(), err
- }
- if !lkv.readySession() {
- return do()
- }
-
- if resp, ok := lkv.leases.Get(ctx, op); resp != nil {
- return resp, nil
- } else if !ok || op.IsSerializable() {
- // must be handled by server or can skip linearization
- return do()
- }
-
- key := string(op.KeyBytes())
- if !lkv.leases.MayAcquire(key) {
- resp, err := lkv.kv.Do(ctx, op)
- return resp.Get(), err
- }
-
- resp, err := lkv.acquire(ctx, key, v3.OpGet(key))
- if err != nil {
- return nil, err
- }
- getResp := (*v3.GetResponse)(resp.Responses[0].GetResponseRange())
- getResp.Header = resp.Header
- if resp.Succeeded {
- getResp = lkv.leases.Add(key, getResp, op)
- lkv.wg.Add(1)
- go func() {
- defer lkv.wg.Done()
- lkv.monitorLease(ctx, key, resp.Header.Revision)
- }()
- }
- return getResp, nil
-}
-
-func (lkv *leasingKV) deleteRangeRPC(ctx context.Context, maxLeaseRev int64, key, end string) (*v3.DeleteResponse, error) {
- lkey, lend := lkv.pfx+key, lkv.pfx+end
- resp, err := lkv.kv.Txn(ctx).If(
- v3.Compare(v3.CreateRevision(lkey).WithRange(lend), "<", maxLeaseRev+1),
- ).Then(
- v3.OpGet(key, v3.WithRange(end), v3.WithKeysOnly()),
- v3.OpDelete(key, v3.WithRange(end)),
- ).Commit()
- if err != nil {
- lkv.leases.EvictRange(key, end)
- return nil, err
- }
- if !resp.Succeeded {
- return nil, nil
- }
- for _, kv := range resp.Responses[0].GetResponseRange().Kvs {
- lkv.leases.Delete(string(kv.Key), resp.Header)
- }
- delResp := (*v3.DeleteResponse)(resp.Responses[1].GetResponseDeleteRange())
- delResp.Header = resp.Header
- return delResp, nil
-}
-
-func (lkv *leasingKV) deleteRange(ctx context.Context, op v3.Op) (*v3.DeleteResponse, error) {
- key, end := string(op.KeyBytes()), string(op.RangeBytes())
- for ctx.Err() == nil {
- maxLeaseRev, err := lkv.revokeRange(ctx, key, end)
- if err != nil {
- return nil, err
- }
- wcs := lkv.leases.LockRange(key, end)
- delResp, err := lkv.deleteRangeRPC(ctx, maxLeaseRev, key, end)
- closeAll(wcs)
- if err != nil || delResp != nil {
- return delResp, err
- }
- }
- return nil, ctx.Err()
-}
-
-func (lkv *leasingKV) delete(ctx context.Context, op v3.Op) (dr *v3.DeleteResponse, err error) {
- if err := lkv.waitSession(ctx); err != nil {
- return nil, err
- }
- if len(op.RangeBytes()) > 0 {
- return lkv.deleteRange(ctx, op)
- }
- key := string(op.KeyBytes())
- for ctx.Err() == nil {
- resp, wc, err := lkv.tryModifyOp(ctx, op)
- if err != nil || wc == nil {
- resp, err = lkv.revoke(ctx, key, op)
- }
- if err != nil {
- // don't know if delete was processed
- lkv.leases.Evict(key)
- return nil, err
- }
- if resp.Succeeded {
- dr = (*v3.DeleteResponse)(resp.Responses[0].GetResponseDeleteRange())
- dr.Header = resp.Header
- lkv.leases.Delete(key, dr.Header)
- }
- if wc != nil {
- close(wc)
- }
- if resp.Succeeded {
- return dr, nil
- }
- }
- return nil, ctx.Err()
-}
-
-func (lkv *leasingKV) revoke(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) {
- rev := lkv.leases.Rev(key)
- txn := lkv.kv.Txn(ctx).If(v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)).Then(op)
- resp, err := txn.Else(v3.OpPut(lkv.pfx+key, "REVOKE", v3.WithIgnoreLease())).Commit()
- if err != nil || resp.Succeeded {
- return resp, err
- }
- return resp, lkv.waitRescind(ctx, key, resp.Header.Revision)
-}
-
-func (lkv *leasingKV) revokeRange(ctx context.Context, begin, end string) (int64, error) {
- lkey, lend := lkv.pfx+begin, ""
- if len(end) > 0 {
- lend = lkv.pfx + end
- }
- leaseKeys, err := lkv.kv.Get(ctx, lkey, v3.WithRange(lend))
- if err != nil {
- return 0, err
- }
- return lkv.revokeLeaseKvs(ctx, leaseKeys.Kvs)
-}
-
-func (lkv *leasingKV) revokeLeaseKvs(ctx context.Context, kvs []*mvccpb.KeyValue) (int64, error) {
- maxLeaseRev := int64(0)
- for _, kv := range kvs {
- if rev := kv.CreateRevision; rev > maxLeaseRev {
- maxLeaseRev = rev
- }
- if v3.LeaseID(kv.Lease) == lkv.leaseID() {
- // don't revoke own keys
- continue
- }
- key := strings.TrimPrefix(string(kv.Key), lkv.pfx)
- if _, err := lkv.revoke(ctx, key, v3.OpGet(key)); err != nil {
- return 0, err
- }
- }
- return maxLeaseRev, nil
-}
-
-func (lkv *leasingKV) waitSession(ctx context.Context) error {
- lkv.leases.mu.RLock()
- sessionc := lkv.sessionc
- lkv.leases.mu.RUnlock()
- select {
- case <-sessionc:
- return nil
- case <-lkv.ctx.Done():
- return lkv.ctx.Err()
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-func (lkv *leasingKV) readySession() bool {
- lkv.leases.mu.RLock()
- defer lkv.leases.mu.RUnlock()
- if lkv.session == nil {
- return false
- }
- select {
- case <-lkv.session.Done():
- default:
- return true
- }
- return false
-}
-
-func (lkv *leasingKV) leaseID() v3.LeaseID {
- lkv.leases.mu.RLock()
- defer lkv.leases.mu.RUnlock()
- return lkv.session.Lease()
-}
diff --git a/client/v3/leasing/txn.go b/client/v3/leasing/txn.go
deleted file mode 100644
index 30c6aa2e4d7..00000000000
--- a/client/v3/leasing/txn.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package leasing
-
-import (
- "context"
- "strings"
-
- v3pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- v3 "go.etcd.io/etcd/client/v3"
-)
-
-type txnLeasing struct {
- v3.Txn
- lkv *leasingKV
- ctx context.Context
- cs []v3.Cmp
- opst []v3.Op
- opse []v3.Op
-}
-
-func (txn *txnLeasing) If(cs ...v3.Cmp) v3.Txn {
- txn.cs = append(txn.cs, cs...)
- txn.Txn = txn.Txn.If(cs...)
- return txn
-}
-
-func (txn *txnLeasing) Then(ops ...v3.Op) v3.Txn {
- txn.opst = append(txn.opst, ops...)
- txn.Txn = txn.Txn.Then(ops...)
- return txn
-}
-
-func (txn *txnLeasing) Else(ops ...v3.Op) v3.Txn {
- txn.opse = append(txn.opse, ops...)
- txn.Txn = txn.Txn.Else(ops...)
- return txn
-}
-
-func (txn *txnLeasing) Commit() (*v3.TxnResponse, error) {
- if resp, err := txn.eval(); resp != nil || err != nil {
- return resp, err
- }
- return txn.serverTxn()
-}
-
-func (txn *txnLeasing) eval() (*v3.TxnResponse, error) {
- // TODO: wait on keys in comparisons
- thenOps, elseOps := gatherOps(txn.opst), gatherOps(txn.opse)
- ops := make([]v3.Op, 0, len(thenOps)+len(elseOps))
- ops = append(ops, thenOps...)
- ops = append(ops, elseOps...)
-
- for _, ch := range txn.lkv.leases.NotifyOps(ops) {
- select {
- case <-ch:
- case <-txn.ctx.Done():
- return nil, txn.ctx.Err()
- }
- }
-
- txn.lkv.leases.mu.RLock()
- defer txn.lkv.leases.mu.RUnlock()
- succeeded, ok := txn.lkv.leases.evalCmp(txn.cs)
- if !ok || txn.lkv.leases.header == nil {
- return nil, nil
- }
- if ops = txn.opst; !succeeded {
- ops = txn.opse
- }
-
- resps, ok := txn.lkv.leases.evalOps(ops)
- if !ok {
- return nil, nil
- }
- return &v3.TxnResponse{Header: copyHeader(txn.lkv.leases.header), Succeeded: succeeded, Responses: resps}, nil
-}
-
-// fallback computes the ops to fetch all possible conflicting
-// leasing keys for a list of ops.
-func (txn *txnLeasing) fallback(ops []v3.Op) (fbOps []v3.Op) {
- for _, op := range ops {
- if op.IsGet() {
- continue
- }
- lkey, lend := txn.lkv.pfx+string(op.KeyBytes()), ""
- if len(op.RangeBytes()) > 0 {
- lend = txn.lkv.pfx + string(op.RangeBytes())
- }
- fbOps = append(fbOps, v3.OpGet(lkey, v3.WithRange(lend)))
- }
- return fbOps
-}
-
-func (txn *txnLeasing) guardKeys(ops []v3.Op) (cmps []v3.Cmp) {
- seen := make(map[string]bool)
- for _, op := range ops {
- key := string(op.KeyBytes())
- if op.IsGet() || len(op.RangeBytes()) != 0 || seen[key] {
- continue
- }
- rev := txn.lkv.leases.Rev(key)
- cmps = append(cmps, v3.Compare(v3.CreateRevision(txn.lkv.pfx+key), "<", rev+1))
- seen[key] = true
- }
- return cmps
-}
-
-func (txn *txnLeasing) guardRanges(ops []v3.Op) (cmps []v3.Cmp, err error) {
- for _, op := range ops {
- if op.IsGet() || len(op.RangeBytes()) == 0 {
- continue
- }
-
- key, end := string(op.KeyBytes()), string(op.RangeBytes())
- maxRevLK, err := txn.lkv.revokeRange(txn.ctx, key, end)
- if err != nil {
- return nil, err
- }
-
- opts := append(v3.WithLastRev(), v3.WithRange(end))
- getResp, err := txn.lkv.kv.Get(txn.ctx, key, opts...)
- if err != nil {
- return nil, err
- }
- maxModRev := int64(0)
- if len(getResp.Kvs) > 0 {
- maxModRev = getResp.Kvs[0].ModRevision
- }
-
- noKeyUpdate := v3.Compare(v3.ModRevision(key).WithRange(end), "<", maxModRev+1)
- noLeaseUpdate := v3.Compare(
- v3.CreateRevision(txn.lkv.pfx+key).WithRange(txn.lkv.pfx+end),
- "<",
- maxRevLK+1)
- cmps = append(cmps, noKeyUpdate, noLeaseUpdate)
- }
- return cmps, nil
-}
-
-func (txn *txnLeasing) guard(ops []v3.Op) ([]v3.Cmp, error) {
- cmps := txn.guardKeys(ops)
- rangeCmps, err := txn.guardRanges(ops)
- return append(cmps, rangeCmps...), err
-}
-
-func (txn *txnLeasing) commitToCache(txnResp *v3pb.TxnResponse, userTxn v3.Op) {
- ops := gatherResponseOps(txnResp.Responses, []v3.Op{userTxn})
- txn.lkv.leases.mu.Lock()
- for _, op := range ops {
- key := string(op.KeyBytes())
- if op.IsDelete() && len(op.RangeBytes()) > 0 {
- end := string(op.RangeBytes())
- for k := range txn.lkv.leases.entries {
- if inRange(k, key, end) {
- txn.lkv.leases.delete(k, txnResp.Header)
- }
- }
- } else if op.IsDelete() {
- txn.lkv.leases.delete(key, txnResp.Header)
- }
- if op.IsPut() {
- txn.lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), txnResp.Header)
- }
- }
- txn.lkv.leases.mu.Unlock()
-}
-
-func (txn *txnLeasing) revokeFallback(fbResps []*v3pb.ResponseOp) error {
- for _, resp := range fbResps {
- _, err := txn.lkv.revokeLeaseKvs(txn.ctx, resp.GetResponseRange().Kvs)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (txn *txnLeasing) serverTxn() (*v3.TxnResponse, error) {
- if err := txn.lkv.waitSession(txn.ctx); err != nil {
- return nil, err
- }
-
- userOps := gatherOps(append(txn.opst, txn.opse...))
- userTxn := v3.OpTxn(txn.cs, txn.opst, txn.opse)
- fbOps := txn.fallback(userOps)
-
- defer closeAll(txn.lkv.leases.LockWriteOps(userOps))
- for {
- cmps, err := txn.guard(userOps)
- if err != nil {
- return nil, err
- }
- resp, err := txn.lkv.kv.Txn(txn.ctx).If(cmps...).Then(userTxn).Else(fbOps...).Commit()
- if err != nil {
- for _, cmp := range cmps {
- txn.lkv.leases.Evict(strings.TrimPrefix(string(cmp.Key), txn.lkv.pfx))
- }
- return nil, err
- }
- if resp.Succeeded {
- txn.commitToCache((*v3pb.TxnResponse)(resp), userTxn)
- userResp := resp.Responses[0].GetResponseTxn()
- userResp.Header = resp.Header
- return (*v3.TxnResponse)(userResp), nil
- }
- if err := txn.revokeFallback(resp.Responses); err != nil {
- return nil, err
- }
- }
-}
diff --git a/client/v3/leasing/util.go b/client/v3/leasing/util.go
deleted file mode 100644
index b6a520f03f0..00000000000
--- a/client/v3/leasing/util.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package leasing
-
-import (
- "bytes"
-
- v3pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- v3 "go.etcd.io/etcd/client/v3"
-)
-
-func compareInt64(a, b int64) int {
- switch {
- case a < b:
- return -1
- case a > b:
- return 1
- default:
- return 0
- }
-}
-
-func evalCmp(resp *v3.GetResponse, tcmp v3.Cmp) bool {
- var result int
- if len(resp.Kvs) != 0 {
- kv := resp.Kvs[0]
- switch tcmp.Target {
- case v3pb.Compare_VALUE:
- if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_Value); tv != nil {
- result = bytes.Compare(kv.Value, tv.Value)
- }
- case v3pb.Compare_CREATE:
- if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_CreateRevision); tv != nil {
- result = compareInt64(kv.CreateRevision, tv.CreateRevision)
- }
- case v3pb.Compare_MOD:
- if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_ModRevision); tv != nil {
- result = compareInt64(kv.ModRevision, tv.ModRevision)
- }
- case v3pb.Compare_VERSION:
- if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_Version); tv != nil {
- result = compareInt64(kv.Version, tv.Version)
- }
- }
- }
- switch tcmp.Result {
- case v3pb.Compare_EQUAL:
- return result == 0
- case v3pb.Compare_NOT_EQUAL:
- return result != 0
- case v3pb.Compare_GREATER:
- return result > 0
- case v3pb.Compare_LESS:
- return result < 0
- }
- return true
-}
-
-func gatherOps(ops []v3.Op) (ret []v3.Op) {
- for _, op := range ops {
- if !op.IsTxn() {
- ret = append(ret, op)
- continue
- }
- _, thenOps, elseOps := op.Txn()
- ret = append(ret, gatherOps(append(thenOps, elseOps...))...)
- }
- return ret
-}
-
-func gatherResponseOps(resp []*v3pb.ResponseOp, ops []v3.Op) (ret []v3.Op) {
- for i, op := range ops {
- if !op.IsTxn() {
- ret = append(ret, op)
- continue
- }
- _, thenOps, elseOps := op.Txn()
- if txnResp := resp[i].GetResponseTxn(); txnResp.Succeeded {
- ret = append(ret, gatherResponseOps(txnResp.Responses, thenOps)...)
- } else {
- ret = append(ret, gatherResponseOps(txnResp.Responses, elseOps)...)
- }
- }
- return ret
-}
-
-func copyHeader(hdr *v3pb.ResponseHeader) *v3pb.ResponseHeader {
- h := *hdr
- return &h
-}
-
-func closeAll(chs []chan<- struct{}) {
- for _, ch := range chs {
- close(ch)
- }
-}
diff --git a/client/v3/logger.go b/client/v3/logger.go
deleted file mode 100644
index 300363cd25b..00000000000
--- a/client/v3/logger.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "log"
- "os"
-
- "go.uber.org/zap/zapcore"
- "go.uber.org/zap/zapgrpc"
- "google.golang.org/grpc/grpclog"
-
- "go.etcd.io/etcd/client/pkg/v3/logutil"
-)
-
-func init() {
- // We override grpc logger only when the environment variable is set
- // in order to not interfere by default with user's code or other libraries.
- if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
- lg, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
- if err != nil {
- panic(err)
- }
- lg = lg.Named("etcd-client")
- grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
- }
-}
-
-// SetLogger sets grpc logger.
-//
-// Deprecated: use grpclog.SetLoggerV2 directly or grpc_zap.ReplaceGrpcLoggerV2.
-func SetLogger(l grpclog.LoggerV2) {
- grpclog.SetLoggerV2(l)
-}
-
-// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level.
-func etcdClientDebugLevel() zapcore.Level {
- envLevel := os.Getenv("ETCD_CLIENT_DEBUG")
- if envLevel == "" || envLevel == "true" {
- return zapcore.InfoLevel
- }
- var l zapcore.Level
- if err := l.Set(envLevel); err != nil {
- log.Print("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'")
- return zapcore.InfoLevel
- }
- return l
-}
diff --git a/client/v3/main_test.go b/client/v3/main_test.go
deleted file mode 100644
index 4007d77bc5a..00000000000
--- a/client/v3/main_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3_test
-
-import (
- "testing"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-const (
- dialTimeout = 5 * time.Second
- requestTimeout = 10 * time.Second
-)
-
-func exampleEndpoints() []string { return nil }
-
-func forUnitTestsRunInMockedContext(mocking func(), example func()) {
- mocking()
- // TODO: Call 'example' when mocking() provides realistic mocking of transport.
-
- // The real testing logic of examples gets executed
- // as part of ./tests/integration/clientv3/integration/...
-}
-
-func TestMain(m *testing.M) {
- testutil.MustTestMainWithLeakDetection(m)
-}
diff --git a/client/v3/maintenance.go b/client/v3/maintenance.go
deleted file mode 100644
index 082b77f1a5a..00000000000
--- a/client/v3/maintenance.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-)
-
-type (
- DefragmentResponse pb.DefragmentResponse
- AlarmResponse pb.AlarmResponse
- AlarmMember pb.AlarmMember
- StatusResponse pb.StatusResponse
- HashKVResponse pb.HashKVResponse
- MoveLeaderResponse pb.MoveLeaderResponse
- DowngradeResponse pb.DowngradeResponse
-
- DowngradeAction pb.DowngradeRequest_DowngradeAction
-)
-
-const (
- DowngradeValidate = DowngradeAction(pb.DowngradeRequest_VALIDATE)
- DowngradeEnable = DowngradeAction(pb.DowngradeRequest_ENABLE)
- DowngradeCancel = DowngradeAction(pb.DowngradeRequest_CANCEL)
-)
-
-type Maintenance interface {
- // AlarmList gets all active alarms.
- AlarmList(ctx context.Context) (*AlarmResponse, error)
-
- // AlarmDisarm disarms a given alarm.
- AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
-
- // Defragment releases wasted space from internal fragmentation on a given etcd member.
- // Defragment is only needed when deleting a large number of keys and want to reclaim
- // the resources.
- // Defragment is an expensive operation. User should avoid defragmenting multiple members
- // at the same time.
- // To defragment multiple members in the cluster, user need to call defragment multiple
- // times with different endpoints.
- Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
-
- // Status gets the status of the endpoint.
- Status(ctx context.Context, endpoint string) (*StatusResponse, error)
-
- // HashKV returns a hash of the KV state at the time of the RPC.
- // If revision is zero, the hash is computed on all keys. If the revision
- // is non-zero, the hash is computed on all keys at or below the given revision.
- HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
-
- // SnapshotWithVersion returns a reader for a point-in-time snapshot and version of etcd that created it.
- // If the context "ctx" is canceled or timed out, reading from returned
- // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
- SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error)
-
- // Snapshot provides a reader for a point-in-time snapshot of etcd.
- // If the context "ctx" is canceled or timed out, reading from returned
- // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
- // Deprecated: use SnapshotWithVersion instead.
- Snapshot(ctx context.Context) (io.ReadCloser, error)
-
- // MoveLeader requests current leader to transfer its leadership to the transferee.
- // Request must be made to the leader.
- MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
-
- // Downgrade requests downgrades, verifies feasibility or cancels downgrade
- // on the cluster version.
- // Supported since etcd 3.5.
- Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error)
-}
-
-// SnapshotResponse is aggregated response from the snapshot stream.
-// Consumer is responsible for closing steam by calling .Snapshot.Close()
-type SnapshotResponse struct {
- // Header is the first header in the snapshot stream, has the current key-value store information
- // and indicates the point in time of the snapshot.
- Header *pb.ResponseHeader
- // Snapshot exposes ReaderCloser interface for data stored in the Blob field in the snapshot stream.
- Snapshot io.ReadCloser
- // Version is the local version of server that created the snapshot.
- // In cluster with binaries with different version, each cluster can return different result.
- // Informs which etcd server version should be used when restoring the snapshot.
- // Supported on etcd >= v3.6.
- Version string
-}
-
-type maintenance struct {
- lg *zap.Logger
- dial func(endpoint string) (pb.MaintenanceClient, func(), error)
- remote pb.MaintenanceClient
- callOpts []grpc.CallOption
-}
-
-func NewMaintenance(c *Client) Maintenance {
- api := &maintenance{
- lg: c.lg,
- dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
- conn, err := c.Dial(endpoint)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
- }
-
- cancel := func() { conn.Close() }
- return RetryMaintenanceClient(c, conn), cancel, nil
- },
- remote: RetryMaintenanceClient(c, c.conn),
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
- api := &maintenance{
- lg: c.lg,
- dial: func(string) (pb.MaintenanceClient, func(), error) {
- return remote, func() {}, nil
- },
- remote: remote,
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
- req := &pb.AlarmRequest{
- Action: pb.AlarmRequest_GET,
- MemberID: 0, // all
- Alarm: pb.AlarmType_NONE, // all
- }
- resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
- if err == nil {
- return (*AlarmResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
- req := &pb.AlarmRequest{
- Action: pb.AlarmRequest_DEACTIVATE,
- MemberID: am.MemberID,
- Alarm: am.Alarm,
- }
-
- if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
- ar, err := m.AlarmList(ctx)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- ret := AlarmResponse{}
- for _, am := range ar.Alarms {
- dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
- if derr != nil {
- return nil, toErr(ctx, derr)
- }
- ret.Alarms = append(ret.Alarms, dresp.Alarms...)
- }
- return &ret, nil
- }
-
- resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
- if err == nil {
- return (*AlarmResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*DefragmentResponse)(resp), nil
-}
-
-func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*StatusResponse)(resp), nil
-}
-
-func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
-
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*HashKVResponse)(resp), nil
-}
-
-func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) {
- ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- m.lg.Info("opened snapshot stream; downloading")
- pr, pw := io.Pipe()
-
- resp, err := ss.Recv()
- if err != nil {
- m.logAndCloseWithError(err, pw)
- return nil, err
- }
- go func() {
- // Saving response is blocking
- err = m.save(resp, pw)
- if err != nil {
- m.logAndCloseWithError(err, pw)
- return
- }
- for {
- resp, err := ss.Recv()
- if err != nil {
- m.logAndCloseWithError(err, pw)
- return
- }
- err = m.save(resp, pw)
- if err != nil {
- m.logAndCloseWithError(err, pw)
- return
- }
- }
- }()
-
- return &SnapshotResponse{
- Header: resp.GetHeader(),
- Snapshot: &snapshotReadCloser{ctx: ctx, ReadCloser: pr},
- Version: resp.GetVersion(),
- }, err
-}
-
-func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
- ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- m.lg.Info("opened snapshot stream; downloading")
- pr, pw := io.Pipe()
-
- go func() {
- for {
- resp, err := ss.Recv()
- if err != nil {
- m.logAndCloseWithError(err, pw)
- return
- }
- err = m.save(resp, pw)
- if err != nil {
- m.logAndCloseWithError(err, pw)
- return
- }
- }
- }()
- return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, err
-}
-
-func (m *maintenance) logAndCloseWithError(err error, pw *io.PipeWriter) {
- switch err {
- case io.EOF:
- m.lg.Info("completed snapshot read; closing")
- default:
- m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err))
- }
- pw.CloseWithError(err)
-}
-
-func (m *maintenance) save(resp *pb.SnapshotResponse, pw *io.PipeWriter) error {
- // can "resp == nil && err == nil"
- // before we receive snapshot SHA digest?
- // No, server sends EOF with an empty response
- // after it sends SHA digest at the end
-
- if _, werr := pw.Write(resp.Blob); werr != nil {
- return werr
- }
- return nil
-}
-
-type snapshotReadCloser struct {
- ctx context.Context
- io.ReadCloser
-}
-
-func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
- n, err = rc.ReadCloser.Read(p)
- return n, toErr(rc.ctx, err)
-}
-
-func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
- resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
- return (*MoveLeaderResponse)(resp), toErr(ctx, err)
-}
-
-func (m *maintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) {
- var actionType pb.DowngradeRequest_DowngradeAction
- switch action {
- case DowngradeValidate:
- actionType = pb.DowngradeRequest_VALIDATE
- case DowngradeEnable:
- actionType = pb.DowngradeRequest_ENABLE
- case DowngradeCancel:
- actionType = pb.DowngradeRequest_CANCEL
- default:
- return nil, errors.New("etcdclient: unknown downgrade action")
- }
- resp, err := m.remote.Downgrade(ctx, &pb.DowngradeRequest{Action: actionType, Version: version}, m.callOpts...)
- return (*DowngradeResponse)(resp), toErr(ctx, err)
-}
diff --git a/client/v3/mirror/syncer.go b/client/v3/mirror/syncer.go
deleted file mode 100644
index 3e83c989a87..00000000000
--- a/client/v3/mirror/syncer.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package mirror implements etcd mirroring operations.
-package mirror
-
-import (
- "context"
-
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-const (
- batchLimit = 1000
-)
-
-// Syncer syncs with the key-value state of an etcd cluster.
-type Syncer interface {
- // SyncBase syncs the base state of the key-value state.
- // The key-value state are sent through the returned chan.
- SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error)
- // SyncUpdates syncs the updates of the key-value state.
- // The update events are sent through the returned chan.
- SyncUpdates(ctx context.Context) clientv3.WatchChan
-}
-
-// NewSyncer creates a Syncer.
-func NewSyncer(c *clientv3.Client, prefix string, rev int64) Syncer {
- return &syncer{c: c, prefix: prefix, rev: rev}
-}
-
-type syncer struct {
- c *clientv3.Client
- rev int64
- prefix string
-}
-
-func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) {
- respchan := make(chan clientv3.GetResponse, 1024)
- errchan := make(chan error, 1)
-
- // if rev is not specified, we will choose the most recent revision.
- if s.rev == 0 {
- // If len(s.prefix) == 0, we will check a random key to fetch the most recent
- // revision (foo), otherwise we use the provided prefix.
- checkPath := "foo"
- if len(s.prefix) != 0 {
- checkPath = s.prefix
- }
- resp, err := s.c.Get(ctx, checkPath)
- if err != nil {
- errchan <- err
- close(respchan)
- close(errchan)
- return respchan, errchan
- }
- s.rev = resp.Header.Revision
- }
-
- go func() {
- defer close(respchan)
- defer close(errchan)
-
- var key string
-
- opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev),
- clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)}
-
- if len(s.prefix) == 0 {
- // If len(s.prefix) == 0, we will sync the entire key-value space.
- // We then range from the smallest key (0x00) to the end.
- opts = append(opts, clientv3.WithFromKey())
- key = "\x00"
- } else {
- // If len(s.prefix) != 0, we will sync key-value space with given prefix.
- // We then range from the prefix to the next prefix if exists. Or we will
- // range from the prefix to the end if the next prefix does not exists.
- opts = append(opts, clientv3.WithRange(clientv3.GetPrefixRangeEnd(s.prefix)))
- key = s.prefix
- }
-
- for {
- resp, err := s.c.Get(ctx, key, opts...)
- if err != nil {
- errchan <- err
- return
- }
-
- respchan <- *resp
-
- if !resp.More {
- return
- }
- // move to next key
- key = string(append(resp.Kvs[len(resp.Kvs)-1].Key, 0))
- }
- }()
-
- return respchan, errchan
-}
-
-func (s *syncer) SyncUpdates(ctx context.Context) clientv3.WatchChan {
- if s.rev == 0 {
- panic("unexpected revision = 0. Calling SyncUpdates before SyncBase finishes?")
- }
- return s.c.Watch(ctx, s.prefix, clientv3.WithPrefix(), clientv3.WithRev(s.rev+1))
-}
diff --git a/client/v3/mock/mockserver/doc.go b/client/v3/mock/mockserver/doc.go
deleted file mode 100644
index 030b3b2ffb7..00000000000
--- a/client/v3/mock/mockserver/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package mockserver provides mock implementations for etcdserver's server interface.
-package mockserver
diff --git a/client/v3/namespace/doc.go b/client/v3/namespace/doc.go
deleted file mode 100644
index 689e0e0bb38..00000000000
--- a/client/v3/namespace/doc.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package namespace is a clientv3 wrapper that translates all keys to begin
-// with a given prefix.
-//
-// First, create a client:
-//
-// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
-// if err != nil {
-// // handle error!
-// }
-//
-// Next, override the client interfaces:
-//
-// unprefixedKV := cli.KV
-// cli.KV = namespace.NewKV(cli.KV, "my-prefix/")
-// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/")
-// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/")
-//
-// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/":
-//
-// cli.Put(context.TODO(), "abc", "123")
-// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc")
-// fmt.Printf("%s\n", resp.Kvs[0].Value)
-// // Output: 123
-// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456")
-// resp, _ = cli.Get(context.TODO(), "abc")
-// fmt.Printf("%s\n", resp.Kvs[0].Value)
-// // Output: 456
-package namespace
diff --git a/client/v3/namespace/kv.go b/client/v3/namespace/kv.go
deleted file mode 100644
index aa338d5356d..00000000000
--- a/client/v3/namespace/kv.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package namespace
-
-import (
- "context"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-type kvPrefix struct {
- clientv3.KV
- pfx string
-}
-
-// NewKV wraps a KV instance so that all requests
-// are prefixed with a given string.
-func NewKV(kv clientv3.KV, prefix string) clientv3.KV {
- return &kvPrefix{kv, prefix}
-}
-
-func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
- if len(key) == 0 {
- return nil, rpctypes.ErrEmptyKey
- }
- op := kv.prefixOp(clientv3.OpPut(key, val, opts...))
- r, err := kv.KV.Do(ctx, op)
- if err != nil {
- return nil, err
- }
- put := r.Put()
- kv.unprefixPutResponse(put)
- return put, nil
-}
-
-func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
- if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) {
- return nil, rpctypes.ErrEmptyKey
- }
- getOp := clientv3.OpGet(key, opts...)
- if !getOp.IsSortOptionValid() {
- return nil, rpctypes.ErrInvalidSortOption
- }
- r, err := kv.KV.Do(ctx, kv.prefixOp(getOp))
- if err != nil {
- return nil, err
- }
- get := r.Get()
- kv.unprefixGetResponse(get)
- return get, nil
-}
-
-func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {
- if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) {
- return nil, rpctypes.ErrEmptyKey
- }
- r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...)))
- if err != nil {
- return nil, err
- }
- del := r.Del()
- kv.unprefixDeleteResponse(del)
- return del, nil
-}
-
-func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
- if len(op.KeyBytes()) == 0 && !op.IsTxn() {
- return clientv3.OpResponse{}, rpctypes.ErrEmptyKey
- }
- r, err := kv.KV.Do(ctx, kv.prefixOp(op))
- if err != nil {
- return r, err
- }
- switch {
- case r.Get() != nil:
- kv.unprefixGetResponse(r.Get())
- case r.Put() != nil:
- kv.unprefixPutResponse(r.Put())
- case r.Del() != nil:
- kv.unprefixDeleteResponse(r.Del())
- case r.Txn() != nil:
- kv.unprefixTxnResponse(r.Txn())
- }
- return r, nil
-}
-
-type txnPrefix struct {
- clientv3.Txn
- kv *kvPrefix
-}
-
-func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn {
- return &txnPrefix{kv.KV.Txn(ctx), kv}
-}
-
-func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn {
- txn.Txn = txn.Txn.If(txn.kv.prefixCmps(cs)...)
- return txn
-}
-
-func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn {
- txn.Txn = txn.Txn.Then(txn.kv.prefixOps(ops)...)
- return txn
-}
-
-func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn {
- txn.Txn = txn.Txn.Else(txn.kv.prefixOps(ops)...)
- return txn
-}
-
-func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) {
- resp, err := txn.Txn.Commit()
- if err != nil {
- return nil, err
- }
- txn.kv.unprefixTxnResponse(resp)
- return resp, nil
-}
-
-func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op {
- if !op.IsTxn() {
- begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes())
- op.WithKeyBytes(begin)
- op.WithRangeBytes(end)
- return op
- }
- cmps, thenOps, elseOps := op.Txn()
- return clientv3.OpTxn(kv.prefixCmps(cmps), kv.prefixOps(thenOps), kv.prefixOps(elseOps))
-}
-
-func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) {
- for i := range resp.Kvs {
- resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):]
- }
-}
-
-func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) {
- if resp.PrevKv != nil {
- resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):]
- }
-}
-
-func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) {
- for i := range resp.PrevKvs {
- resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):]
- }
-}
-
-func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) {
- for _, r := range resp.Responses {
- switch tv := r.Response.(type) {
- case *pb.ResponseOp_ResponseRange:
- if tv.ResponseRange != nil {
- kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange))
- }
- case *pb.ResponseOp_ResponsePut:
- if tv.ResponsePut != nil {
- kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut))
- }
- case *pb.ResponseOp_ResponseDeleteRange:
- if tv.ResponseDeleteRange != nil {
- kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange))
- }
- case *pb.ResponseOp_ResponseTxn:
- if tv.ResponseTxn != nil {
- kv.unprefixTxnResponse((*clientv3.TxnResponse)(tv.ResponseTxn))
- }
- default:
- }
- }
-}
-
-func (kv *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) {
- return prefixInterval(kv.pfx, key, end)
-}
-
-func (kv *kvPrefix) prefixCmps(cs []clientv3.Cmp) []clientv3.Cmp {
- newCmps := make([]clientv3.Cmp, len(cs))
- for i := range cs {
- newCmps[i] = cs[i]
- pfxKey, endKey := kv.prefixInterval(cs[i].KeyBytes(), cs[i].RangeEnd)
- newCmps[i].WithKeyBytes(pfxKey)
- if len(cs[i].RangeEnd) != 0 {
- newCmps[i].RangeEnd = endKey
- }
- }
- return newCmps
-}
-
-func (kv *kvPrefix) prefixOps(ops []clientv3.Op) []clientv3.Op {
- newOps := make([]clientv3.Op, len(ops))
- for i := range ops {
- newOps[i] = kv.prefixOp(ops[i])
- }
- return newOps
-}
diff --git a/client/v3/namespace/lease.go b/client/v3/namespace/lease.go
deleted file mode 100644
index b80b530467c..00000000000
--- a/client/v3/namespace/lease.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package namespace
-
-import (
- "bytes"
- "context"
-
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-type leasePrefix struct {
- clientv3.Lease
- pfx []byte
-}
-
-// NewLease wraps a Lease interface to filter for only keys with a prefix
-// and remove that prefix when fetching attached keys through TimeToLive.
-func NewLease(l clientv3.Lease, prefix string) clientv3.Lease {
- return &leasePrefix{l, []byte(prefix)}
-}
-
-func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
- resp, err := l.Lease.TimeToLive(ctx, id, opts...)
- if err != nil {
- return nil, err
- }
- if len(resp.Keys) > 0 {
- var outKeys [][]byte
- for i := range resp.Keys {
- if len(resp.Keys[i]) < len(l.pfx) {
- // too short
- continue
- }
- if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) {
- // doesn't match prefix
- continue
- }
- // strip prefix
- outKeys = append(outKeys, resp.Keys[i][len(l.pfx):])
- }
- resp.Keys = outKeys
- }
- return resp, nil
-}
diff --git a/client/v3/namespace/util_test.go b/client/v3/namespace/util_test.go
deleted file mode 100644
index 9ba472b0a28..00000000000
--- a/client/v3/namespace/util_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package namespace
-
-import (
- "bytes"
- "testing"
-)
-
-func TestPrefixInterval(t *testing.T) {
- tests := []struct {
- pfx string
- key []byte
- end []byte
-
- wKey []byte
- wEnd []byte
- }{
- // single key
- {
- pfx: "pfx/",
- key: []byte("a"),
-
- wKey: []byte("pfx/a"),
- },
- // range
- {
- pfx: "pfx/",
- key: []byte("abc"),
- end: []byte("def"),
-
- wKey: []byte("pfx/abc"),
- wEnd: []byte("pfx/def"),
- },
- // one-sided range
- {
- pfx: "pfx/",
- key: []byte("abc"),
- end: []byte{0},
-
- wKey: []byte("pfx/abc"),
- wEnd: []byte("pfx0"),
- },
- // one-sided range, end of keyspace
- {
- pfx: "\xff\xff",
- key: []byte("abc"),
- end: []byte{0},
-
- wKey: []byte("\xff\xffabc"),
- wEnd: []byte{0},
- },
- }
- for i, tt := range tests {
- pfxKey, pfxEnd := prefixInterval(tt.pfx, tt.key, tt.end)
- if !bytes.Equal(pfxKey, tt.wKey) {
- t.Errorf("#%d: expected key=%q, got key=%q", i, tt.wKey, pfxKey)
- }
- if !bytes.Equal(pfxEnd, tt.wEnd) {
- t.Errorf("#%d: expected end=%q, got end=%q", i, tt.wEnd, pfxEnd)
- }
- }
-}
diff --git a/client/v3/namespace/watch.go b/client/v3/namespace/watch.go
deleted file mode 100644
index edf1af87b58..00000000000
--- a/client/v3/namespace/watch.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package namespace
-
-import (
- "context"
- "sync"
-
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-type watcherPrefix struct {
- clientv3.Watcher
- pfx string
-
- wg sync.WaitGroup
- stopc chan struct{}
- stopOnce sync.Once
-}
-
-// NewWatcher wraps a Watcher instance so that all Watch requests
-// are prefixed with a given string and all Watch responses have
-// the prefix removed.
-func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher {
- return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})}
-}
-
-func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
- // since OpOption is opaque, determine range for prefixing through an OpGet
- op := clientv3.OpGet(key, opts...)
- end := op.RangeBytes()
- pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end)
- if pfxEnd != nil {
- opts = append(opts, clientv3.WithRange(string(pfxEnd)))
- }
-
- wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...)
-
- // translate watch events from prefixed to unprefixed
- pfxWch := make(chan clientv3.WatchResponse)
- w.wg.Add(1)
- go func() {
- defer func() {
- close(pfxWch)
- w.wg.Done()
- }()
- for wr := range wch {
- for i := range wr.Events {
- wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):]
- if wr.Events[i].PrevKv != nil {
- wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key
- }
- }
- select {
- case pfxWch <- wr:
- case <-ctx.Done():
- return
- case <-w.stopc:
- return
- }
- }
- }()
- return pfxWch
-}
-
-func (w *watcherPrefix) Close() error {
- err := w.Watcher.Close()
- w.stopOnce.Do(func() { close(w.stopc) })
- w.wg.Wait()
- return err
-}
diff --git a/client/v3/naming/doc.go b/client/v3/naming/doc.go
deleted file mode 100644
index f2050a6aa6c..00000000000
--- a/client/v3/naming/doc.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package naming provides:
-// - subpackage endpoints: an abstraction layer to store and read endpoints
-// information from etcd.
-// - subpackage resolver: an etcd-backed gRPC resolver for discovering gRPC
-// services based on the endpoints configuration
-//
-// To use, first import the packages:
-//
-// import (
-// "go.etcd.io/etcd/client/v3"
-// "go.etcd.io/etcd/client/v3/naming/endpoints"
-// "go.etcd.io/etcd/client/v3/naming/resolver"
-// "google.golang.org/grpc"
-// )
-//
-// First, register new endpoint addresses for a service:
-//
-// func etcdAdd(c *clientv3.Client, service, addr string) error {
-// em := endpoints.NewManager(c, service)
-// return em.AddEndpoint(c.Ctx(), service+"/"+addr, endpoints.Endpoint{Addr:addr});
-// }
-//
-// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer:
-//
-// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) {
-// etcdResolver, err := resolver.NewBuilder(c);
-// if err { return nil, err }
-// return grpc.Dial("etcd:///" + service, grpc.WithResolvers(etcdResolver))
-// }
-//
-// Optionally, force delete an endpoint:
-//
-// func etcdDelete(c *clientv3, service, addr string) error {
-// em := endpoints.NewManager(c, service)
-// return em.DeleteEndpoint(c.Ctx(), service+"/"+addr)
-// }
-//
-// Or register an expiring endpoint with a lease:
-//
-// func etcdAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error {
-// em := endpoints.NewManager(c, service)
-// return em.AddEndpoint(c.Ctx(), service+"/"+addr, endpoints.Endpoint{Addr:addr}, clientv3.WithLease(lid));
-// }
-package naming
diff --git a/client/v3/naming/endpoints/internal/update.go b/client/v3/naming/endpoints/internal/update.go
deleted file mode 100644
index d42f49062a4..00000000000
--- a/client/v3/naming/endpoints/internal/update.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-// Operation describes action performed on endpoint (addition vs deletion).
-// Must stay JSON-format compatible with:
-// https://pkg.go.dev/google.golang.org/grpc@v1.29.1/naming#Operation
-type Operation uint8
-
-const (
- // Add indicates a new address is added.
- Add Operation = iota
- // Delete indicates an existing address is deleted.
- Delete
-)
-
-// Update defines a persistent (JSON marshalled) format representing
-// endpoint within the etcd storage.
-//
-// As the format can be persisted by one version of etcd client library and
-// read by other the format must be kept backward compatible and
-// in particular must be superset of the grpc(<=1.29.1) naming.Update structure:
-// https://pkg.go.dev/google.golang.org/grpc@v1.29.1/naming#Update
-//
-// Please document since which version of etcd-client given property is supported.
-// Please keep the naming consistent with e.g. https://pkg.go.dev/google.golang.org/grpc/resolver#Address.
-//
-// Notice that it is not valid having both empty string Addr and nil Metadata in an Update.
-type Update struct {
- // Op indicates the operation of the update.
- // Since etcd 3.1.
- Op Operation
- // Addr is the updated address. It is empty string if there is no address update.
- // Since etcd 3.1.
- Addr string
- // Metadata is the updated metadata. It is nil if there is no metadata update.
- // Metadata is not required for a custom naming implementation.
- // Since etcd 3.1.
- Metadata interface{}
-}
diff --git a/client/v3/naming/resolver/resolver.go b/client/v3/naming/resolver/resolver.go
deleted file mode 100644
index 7b9f61d2e08..00000000000
--- a/client/v3/naming/resolver/resolver.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package resolver
-
-import (
- "context"
- "strings"
- "sync"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/naming/endpoints"
-
- "google.golang.org/grpc/codes"
- gresolver "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/status"
-)
-
-type builder struct {
- c *clientv3.Client
-}
-
-func (b builder) Build(target gresolver.Target, cc gresolver.ClientConn, opts gresolver.BuildOptions) (gresolver.Resolver, error) {
- // Refer to https://github.com/grpc/grpc-go/blob/16d3df80f029f57cff5458f1d6da6aedbc23545d/clientconn.go#L1587-L1611
- endpoint := target.URL.Path
- if endpoint == "" {
- endpoint = target.URL.Opaque
- }
- endpoint = strings.TrimPrefix(endpoint, "/")
- r := &resolver{
- c: b.c,
- target: endpoint,
- cc: cc,
- }
- r.ctx, r.cancel = context.WithCancel(context.Background())
-
- em, err := endpoints.NewManager(r.c, r.target)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, "resolver: failed to new endpoint manager: %s", err)
- }
- r.wch, err = em.NewWatchChannel(r.ctx)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "resolver: failed to new watch channer: %s", err)
- }
-
- r.wg.Add(1)
- go r.watch()
- return r, nil
-}
-
-func (b builder) Scheme() string {
- return "etcd"
-}
-
-// NewBuilder creates a resolver builder.
-func NewBuilder(client *clientv3.Client) (gresolver.Builder, error) {
- return builder{c: client}, nil
-}
-
-type resolver struct {
- c *clientv3.Client
- target string
- cc gresolver.ClientConn
- wch endpoints.WatchChannel
- ctx context.Context
- cancel context.CancelFunc
- wg sync.WaitGroup
-}
-
-func (r *resolver) watch() {
- defer r.wg.Done()
-
- allUps := make(map[string]*endpoints.Update)
- for {
- select {
- case <-r.ctx.Done():
- return
- case ups, ok := <-r.wch:
- if !ok {
- return
- }
-
- for _, up := range ups {
- switch up.Op {
- case endpoints.Add:
- allUps[up.Key] = up
- case endpoints.Delete:
- delete(allUps, up.Key)
- }
- }
-
- addrs := convertToGRPCAddress(allUps)
- r.cc.UpdateState(gresolver.State{Addresses: addrs})
- }
- }
-}
-
-func convertToGRPCAddress(ups map[string]*endpoints.Update) []gresolver.Address {
- var addrs []gresolver.Address
- for _, up := range ups {
- addr := gresolver.Address{
- Addr: up.Endpoint.Addr,
- Metadata: up.Endpoint.Metadata,
- }
- addrs = append(addrs, addr)
- }
- return addrs
-}
-
-// ResolveNow is a no-op here.
-// It's just a hint, resolver can ignore this if it's not necessary.
-func (r *resolver) ResolveNow(gresolver.ResolveNowOptions) {}
-
-func (r *resolver) Close() {
- r.cancel()
- r.wg.Wait()
-}
diff --git a/client/v3/op_test.go b/client/v3/op_test.go
deleted file mode 100644
index f1890eafafc..00000000000
--- a/client/v3/op_test.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "reflect"
- "testing"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
-)
-
-// TestOpWithSort tests if WithSort(ASCEND, KEY) and WithLimit are specified,
-// RangeRequest ignores the SortOption to avoid unnecessarily fetching
-// the entire key-space.
-func TestOpWithSort(t *testing.T) {
- opReq := OpGet("foo", WithSort(SortByKey, SortAscend), WithLimit(10)).toRequestOp().Request
- q, ok := opReq.(*pb.RequestOp_RequestRange)
- if !ok {
- t.Fatalf("expected range request, got %v", reflect.TypeOf(opReq))
- }
- req := q.RequestRange
- wreq := &pb.RangeRequest{Key: []byte("foo"), SortOrder: pb.RangeRequest_NONE, Limit: 10}
- if !reflect.DeepEqual(req, wreq) {
- t.Fatalf("expected %+v, got %+v", wreq, req)
- }
-}
-
-func TestIsSortOptionValid(t *testing.T) {
- rangeReqs := []struct {
- sortOrder pb.RangeRequest_SortOrder
- sortTarget pb.RangeRequest_SortTarget
- expectedValid bool
- }{
- {
- sortOrder: pb.RangeRequest_ASCEND,
- sortTarget: pb.RangeRequest_CREATE,
- expectedValid: true,
- },
- {
- sortOrder: pb.RangeRequest_ASCEND,
- sortTarget: 100,
- expectedValid: false,
- },
- {
- sortOrder: 200,
- sortTarget: pb.RangeRequest_MOD,
- expectedValid: false,
- },
- }
-
- for _, req := range rangeReqs {
- getOp := Op{
- sort: &SortOption{
- Order: SortOrder(req.sortOrder),
- Target: SortTarget(req.sortTarget),
- },
- }
-
- actualRet := getOp.IsSortOptionValid()
- if actualRet != req.expectedValid {
- t.Errorf("expected sortOrder (%d) and sortTarget (%d) to be %t, but got %t",
- req.sortOrder, req.sortTarget, req.expectedValid, actualRet)
- }
- }
-}
-
-func TestIsOptsWithPrefix(t *testing.T) {
- optswithprefix := []OpOption{WithPrefix()}
- if !IsOptsWithPrefix(optswithprefix) {
- t.Errorf("IsOptsWithPrefix = false, expected true")
- }
-
- optswithfromkey := []OpOption{WithFromKey()}
- if IsOptsWithPrefix(optswithfromkey) {
- t.Errorf("IsOptsWithPrefix = true, expected false")
- }
-}
-
-func TestIsOptsWithFromKey(t *testing.T) {
- optswithfromkey := []OpOption{WithFromKey()}
- if !IsOptsWithFromKey(optswithfromkey) {
- t.Errorf("IsOptsWithFromKey = false, expected true")
- }
-
- optswithprefix := []OpOption{WithPrefix()}
- if IsOptsWithFromKey(optswithprefix) {
- t.Errorf("IsOptsWithFromKey = true, expected false")
- }
-}
diff --git a/client/v3/options.go b/client/v3/options.go
deleted file mode 100644
index cc10a03d76d..00000000000
--- a/client/v3/options.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "math"
- "time"
-
- "google.golang.org/grpc"
-)
-
-var (
- // client-side handling retrying of request failures where data was not written to the wire or
- // where server indicates it did not process the data. gRPC default is "WaitForReady(false)"
- // but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to
- // transient failures.
- defaultWaitForReady = grpc.WaitForReady(true)
-
- // client-side request send limit, gRPC default is math.MaxInt32
- // Make sure that "client-side send limit < server-side default send/recv limit"
- // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
- defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
-
- // client-side response receive limit, gRPC default is 4MB
- // Make sure that "client-side receive limit >= server-side default send/recv limit"
- // because range response can easily exceed request send limits
- // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
- defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
-
- // client-side non-streaming retry limit, only applied to requests where server responds with
- // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
- // If set to 0, retry is disabled.
- defaultUnaryMaxRetries uint = 100
-
- // client-side streaming retry limit, only applied to requests where server responds with
- // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
- // If set to 0, retry is disabled.
- defaultStreamMaxRetries = ^uint(0) // max uint
-
- // client-side retry backoff wait between requests.
- defaultBackoffWaitBetween = 25 * time.Millisecond
-
- // client-side retry backoff default jitter fraction.
- defaultBackoffJitterFraction = 0.10
-)
-
-// defaultCallOpts defines a list of default "gRPC.CallOption".
-// Some options are exposed to "clientv3.Config".
-// Defaults will be overridden by the settings in "clientv3.Config".
-var defaultCallOpts = []grpc.CallOption{
- defaultWaitForReady,
- defaultMaxCallSendMsgSize,
- defaultMaxCallRecvMsgSize,
-}
-
-// MaxLeaseTTL is the maximum lease TTL value
-const MaxLeaseTTL = 9000000000
diff --git a/client/v3/ordering/doc.go b/client/v3/ordering/doc.go
deleted file mode 100644
index 03588248bd6..00000000000
--- a/client/v3/ordering/doc.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ordering is a clientv3 wrapper that caches response header revisions
-// to detect ordering violations from stale responses. Users may define a
-// policy on how to handle the ordering violation, but typically the client
-// should connect to another endpoint and reissue the request.
-//
-// The most common situation where an ordering violation happens is a client
-// reconnects to a partitioned member and issues a serializable read. Since the
-// partitioned member is likely behind the last member, it may return a Get
-// response based on a store revision older than the store revision used to
-// service a prior Get on the former endpoint.
-//
-// First, create a client:
-//
-// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
-// if err != nil {
-// // handle error!
-// }
-//
-// Next, override the client interface with the ordering wrapper:
-//
-// vf := func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
-// return fmt.Errorf("ordering: issued %+v, got %+v, expected rev=%v", op, resp, prevRev)
-// }
-// cli.KV = ordering.NewKV(cli.KV, vf)
-//
-// Now calls using 'cli' will reject order violations with an error.
-package ordering
diff --git a/client/v3/ordering/kv.go b/client/v3/ordering/kv.go
deleted file mode 100644
index 9075cbf9890..00000000000
--- a/client/v3/ordering/kv.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ordering
-
-import (
- "context"
- "sync"
-
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-// kvOrdering ensures that serialized requests do not return
-// get with revisions less than the previous
-// returned revision.
-type kvOrdering struct {
- clientv3.KV
- orderViolationFunc OrderViolationFunc
- prevRev int64
- revMu sync.RWMutex
-}
-
-func NewKV(kv clientv3.KV, orderViolationFunc OrderViolationFunc) *kvOrdering {
- return &kvOrdering{kv, orderViolationFunc, 0, sync.RWMutex{}}
-}
-
-func (kv *kvOrdering) getPrevRev() int64 {
- kv.revMu.RLock()
- defer kv.revMu.RUnlock()
- return kv.prevRev
-}
-
-func (kv *kvOrdering) setPrevRev(currRev int64) {
- kv.revMu.Lock()
- defer kv.revMu.Unlock()
- if currRev > kv.prevRev {
- kv.prevRev = currRev
- }
-}
-
-func (kv *kvOrdering) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
- // prevRev is stored in a local variable in order to record the prevRev
- // at the beginning of the Get operation, because concurrent
- // access to kvOrdering could change the prevRev field in the
- // middle of the Get operation.
- prevRev := kv.getPrevRev()
- op := clientv3.OpGet(key, opts...)
- for {
- r, err := kv.KV.Do(ctx, op)
- if err != nil {
- return nil, err
- }
- resp := r.Get()
- if resp.Header.Revision == prevRev {
- return resp, nil
- } else if resp.Header.Revision > prevRev {
- kv.setPrevRev(resp.Header.Revision)
- return resp, nil
- }
- err = kv.orderViolationFunc(op, r, prevRev)
- if err != nil {
- return nil, err
- }
- }
-}
-
-func (kv *kvOrdering) Txn(ctx context.Context) clientv3.Txn {
- return &txnOrdering{
- kv.KV.Txn(ctx),
- kv,
- ctx,
- sync.Mutex{},
- []clientv3.Cmp{},
- []clientv3.Op{},
- []clientv3.Op{},
- }
-}
-
-// txnOrdering ensures that serialized requests do not return
-// txn responses with revisions less than the previous
-// returned revision.
-type txnOrdering struct {
- clientv3.Txn
- *kvOrdering
- ctx context.Context
- mu sync.Mutex
- cmps []clientv3.Cmp
- thenOps []clientv3.Op
- elseOps []clientv3.Op
-}
-
-func (txn *txnOrdering) If(cs ...clientv3.Cmp) clientv3.Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
- txn.cmps = cs
- txn.Txn.If(cs...)
- return txn
-}
-
-func (txn *txnOrdering) Then(ops ...clientv3.Op) clientv3.Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
- txn.thenOps = ops
- txn.Txn.Then(ops...)
- return txn
-}
-
-func (txn *txnOrdering) Else(ops ...clientv3.Op) clientv3.Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
- txn.elseOps = ops
- txn.Txn.Else(ops...)
- return txn
-}
-
-func (txn *txnOrdering) Commit() (*clientv3.TxnResponse, error) {
- // prevRev is stored in a local variable in order to record the prevRev
- // at the beginning of the Commit operation, because concurrent
- // access to txnOrdering could change the prevRev field in the
- // middle of the Commit operation.
- prevRev := txn.getPrevRev()
- opTxn := clientv3.OpTxn(txn.cmps, txn.thenOps, txn.elseOps)
- for {
- opResp, err := txn.KV.Do(txn.ctx, opTxn)
- if err != nil {
- return nil, err
- }
- txnResp := opResp.Txn()
- if txnResp.Header.Revision >= prevRev {
- txn.setPrevRev(txnResp.Header.Revision)
- return txnResp, nil
- }
- err = txn.orderViolationFunc(opTxn, opResp, prevRev)
- if err != nil {
- return nil, err
- }
- }
-}
diff --git a/client/v3/ordering/kv_test.go b/client/v3/ordering/kv_test.go
deleted file mode 100644
index 2168c315752..00000000000
--- a/client/v3/ordering/kv_test.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ordering
-
-import (
- "context"
- gContext "context"
- "sync"
- "testing"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-type mockKV struct {
- clientv3.KV
- response clientv3.OpResponse
-}
-
-func (kv *mockKV) Do(ctx gContext.Context, op clientv3.Op) (clientv3.OpResponse, error) {
- return kv.response, nil
-}
-
-var rangeTests = []struct {
- prevRev int64
- response *clientv3.GetResponse
-}{
- {
- 5,
- &clientv3.GetResponse{
- Header: &pb.ResponseHeader{
- Revision: 5,
- },
- },
- },
- {
- 5,
- &clientv3.GetResponse{
- Header: &pb.ResponseHeader{
- Revision: 4,
- },
- },
- },
- {
- 5,
- &clientv3.GetResponse{
- Header: &pb.ResponseHeader{
- Revision: 6,
- },
- },
- },
-}
-
-func TestKvOrdering(t *testing.T) {
- for i, tt := range rangeTests {
- mKV := &mockKV{clientv3.NewKVFromKVClient(nil, nil), tt.response.OpResponse()}
- kv := &kvOrdering{
- mKV,
- func(r *clientv3.GetResponse) OrderViolationFunc {
- return func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
- r.Header.Revision++
- return nil
- }
- }(tt.response),
- tt.prevRev,
- sync.RWMutex{},
- }
- res, err := kv.Get(context.TODO(), "mockKey")
- if err != nil {
- t.Errorf("#%d: expected response %+v, got error %+v", i, tt.response, err)
- }
- if rev := res.Header.Revision; rev < tt.prevRev {
- t.Errorf("#%d: expected revision %d, got %d", i, tt.prevRev, rev)
- }
- }
-}
-
-var txnTests = []struct {
- prevRev int64
- response *clientv3.TxnResponse
-}{
- {
- 5,
- &clientv3.TxnResponse{
- Header: &pb.ResponseHeader{
- Revision: 5,
- },
- },
- },
- {
- 5,
- &clientv3.TxnResponse{
- Header: &pb.ResponseHeader{
- Revision: 8,
- },
- },
- },
- {
- 5,
- &clientv3.TxnResponse{
- Header: &pb.ResponseHeader{
- Revision: 4,
- },
- },
- },
-}
-
-func TestTxnOrdering(t *testing.T) {
- for i, tt := range txnTests {
- mKV := &mockKV{clientv3.NewKVFromKVClient(nil, nil), tt.response.OpResponse()}
- kv := &kvOrdering{
- mKV,
- func(r *clientv3.TxnResponse) OrderViolationFunc {
- return func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
- r.Header.Revision++
- return nil
- }
- }(tt.response),
- tt.prevRev,
- sync.RWMutex{},
- }
- txn := &txnOrdering{
- kv.Txn(context.Background()),
- kv,
- context.Background(),
- sync.Mutex{},
- []clientv3.Cmp{},
- []clientv3.Op{},
- []clientv3.Op{},
- }
- res, err := txn.Commit()
- if err != nil {
- t.Errorf("#%d: expected response %+v, got error %+v", i, tt.response, err)
- }
- if rev := res.Header.Revision; rev < tt.prevRev {
- t.Errorf("#%d: expected revision %d, got %d", i, tt.prevRev, rev)
- }
- }
-}
diff --git a/client/v3/ordering/util.go b/client/v3/ordering/util.go
deleted file mode 100644
index 701cc709616..00000000000
--- a/client/v3/ordering/util.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ordering
-
-import (
- "errors"
- "sync/atomic"
-
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error
-
-var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision")
-
-func NewOrderViolationSwitchEndpointClosure(c *clientv3.Client) OrderViolationFunc {
- violationCount := int32(0)
- return func(_ clientv3.Op, _ clientv3.OpResponse, _ int64) error {
- // Each request is assigned by round-robin load-balancer's picker to a different
- // endpoints. If we cycled them 5 times (even with some level of concurrency),
- // with high probability no endpoint points on a member with fresh data.
- // TODO: Ideally we should track members (resp.opp.Header) that returned
- // stale result and explicitly temporarily disable them in 'picker'.
- if atomic.LoadInt32(&violationCount) > int32(5*len(c.Endpoints())) {
- return ErrNoGreaterRev
- }
- atomic.AddInt32(&violationCount, 1)
- return nil
- }
-}
diff --git a/client/v3/retry_interceptor_test.go b/client/v3/retry_interceptor_test.go
deleted file mode 100644
index 6746f10adca..00000000000
--- a/client/v3/retry_interceptor_test.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "testing"
-
- grpccredentials "google.golang.org/grpc/credentials"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/v3/credentials"
-)
-
-type dummyAuthTokenBundle struct{}
-
-func (d dummyAuthTokenBundle) TransportCredentials() grpccredentials.TransportCredentials {
- return nil
-}
-
-func (d dummyAuthTokenBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
- return nil
-}
-
-func (d dummyAuthTokenBundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
- return nil, nil
-}
-
-func (d dummyAuthTokenBundle) UpdateAuthToken(token string) {
-}
-
-func TestClientShouldRefreshToken(t *testing.T) {
- type fields struct {
- authTokenBundle credentials.Bundle
- }
- type args struct {
- err error
- callOpts *options
- }
-
- optsWithTrue := &options{
- retryAuth: true,
- }
- optsWithFalse := &options{
- retryAuth: false,
- }
-
- tests := []struct {
- name string
- fields fields
- args args
- want bool
- }{
- {
- name: "ErrUserEmpty and non nil authTokenBundle",
- fields: fields{
- authTokenBundle: &dummyAuthTokenBundle{},
- },
- args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue},
- want: true,
- },
- {
- name: "ErrUserEmpty and nil authTokenBundle",
- fields: fields{
- authTokenBundle: nil,
- },
- args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue},
- want: false,
- },
- {
- name: "ErrGRPCInvalidAuthToken and retryAuth",
- fields: fields{
- authTokenBundle: nil,
- },
- args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithTrue},
- want: true,
- },
- {
- name: "ErrGRPCInvalidAuthToken and !retryAuth",
- fields: fields{
- authTokenBundle: nil,
- },
- args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithFalse},
- want: false,
- },
- {
- name: "ErrGRPCAuthOldRevision and retryAuth",
- fields: fields{
- authTokenBundle: nil,
- },
- args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithTrue},
- want: true,
- },
- {
- name: "ErrGRPCAuthOldRevision and !retryAuth",
- fields: fields{
- authTokenBundle: nil,
- },
- args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithFalse},
- want: false,
- },
- {
- name: "Other error and retryAuth",
- fields: fields{
- authTokenBundle: nil,
- },
- args: args{rpctypes.ErrGRPCAuthFailed, optsWithTrue},
- want: false,
- },
- {
- name: "Other error and !retryAuth",
- fields: fields{
- authTokenBundle: nil,
- },
- args: args{rpctypes.ErrGRPCAuthFailed, optsWithFalse},
- want: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- c := &Client{
- authTokenBundle: tt.fields.authTokenBundle,
- }
- if got := c.shouldRefreshToken(tt.args.err, tt.args.callOpts); got != tt.want {
- t.Errorf("shouldRefreshToken() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/client/v3/snapshot/v3_snapshot.go b/client/v3/snapshot/v3_snapshot.go
deleted file mode 100644
index 3e36198422e..00000000000
--- a/client/v3/snapshot/v3_snapshot.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package snapshot
-
-import (
- "context"
- "crypto/sha256"
- "fmt"
- "io"
- "os"
- "time"
-
- "github.com/dustin/go-humanize"
- "go.uber.org/zap"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-// hasChecksum returns "true" if the file size "n"
-// has appended sha256 hash digest.
-func hasChecksum(n int64) bool {
- // 512 is chosen because it's a minimum disk sector size
- // smaller than (and multiplies to) OS page size in most systems
- return (n % 512) == sha256.Size
-}
-
-// SaveWithVersion fetches snapshot from remote etcd server, saves data
-// to target path and returns server version. If the context "ctx" is canceled or timed out,
-// snapshot save stream will error out (e.g. context.Canceled,
-// context.DeadlineExceeded). Make sure to specify only one endpoint
-// in client configuration. Snapshot API must be requested to a
-// selected node, and saved snapshot is the point-in-time state of
-// the selected node.
-// Etcd ", v1),
-// Compare(Version(k1), "=", 2)
-// ).Then(
-// OpPut(k2,v2), OpPut(k3,v3)
-// ).Else(
-// OpPut(k4,v4), OpPut(k5,v5)
-// ).Commit()
-type Txn interface {
- // If takes a list of comparison. If all comparisons passed in succeed,
- // the operations passed into Then() will be executed. Or the operations
- // passed into Else() will be executed.
- If(cs ...Cmp) Txn
-
- // Then takes a list of operations. The Ops list will be executed, if the
- // comparisons passed in If() succeed.
- Then(ops ...Op) Txn
-
- // Else takes a list of operations. The Ops list will be executed, if the
- // comparisons passed in If() fail.
- Else(ops ...Op) Txn
-
- // Commit tries to commit the transaction.
- Commit() (*TxnResponse, error)
-}
-
-type txn struct {
- kv *kv
- ctx context.Context
-
- mu sync.Mutex
- cif bool
- cthen bool
- celse bool
-
- isWrite bool
-
- cmps []*pb.Compare
-
- sus []*pb.RequestOp
- fas []*pb.RequestOp
-
- callOpts []grpc.CallOption
-}
-
-func (txn *txn) If(cs ...Cmp) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.cif {
- panic("cannot call If twice!")
- }
-
- if txn.cthen {
- panic("cannot call If after Then!")
- }
-
- if txn.celse {
- panic("cannot call If after Else!")
- }
-
- txn.cif = true
-
- for i := range cs {
- txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
- }
-
- return txn
-}
-
-func (txn *txn) Then(ops ...Op) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.cthen {
- panic("cannot call Then twice!")
- }
- if txn.celse {
- panic("cannot call Then after Else!")
- }
-
- txn.cthen = true
-
- for _, op := range ops {
- txn.isWrite = txn.isWrite || op.isWrite()
- txn.sus = append(txn.sus, op.toRequestOp())
- }
-
- return txn
-}
-
-func (txn *txn) Else(ops ...Op) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.celse {
- panic("cannot call Else twice!")
- }
-
- txn.celse = true
-
- for _, op := range ops {
- txn.isWrite = txn.isWrite || op.isWrite()
- txn.fas = append(txn.fas, op.toRequestOp())
- }
-
- return txn
-}
-
-func (txn *txn) Commit() (*TxnResponse, error) {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
-
- var resp *pb.TxnResponse
- var err error
- resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
- if err != nil {
- return nil, toErr(txn.ctx, err)
- }
- return (*TxnResponse)(resp), nil
-}
diff --git a/client/v3/txn_test.go b/client/v3/txn_test.go
deleted file mode 100644
index 0ee6e71d6be..00000000000
--- a/client/v3/txn_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "testing"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/testutil"
-)
-
-func TestTxnPanics(t *testing.T) {
- testutil.RegisterLeakDetection(t)
-
- kv := &kv{}
-
- errc := make(chan string, 6)
- df := func() {
- if s := recover(); s != nil {
- errc <- s.(string)
- }
- }
-
- cmp := Compare(CreateRevision("foo"), "=", 0)
- op := OpPut("foo", "bar")
-
- tests := []struct {
- f func()
-
- err string
- }{
- {
- f: func() {
- defer df()
- kv.Txn(context.TODO()).If(cmp).If(cmp)
- },
-
- err: "cannot call If twice!",
- },
- {
- f: func() {
- defer df()
- kv.Txn(context.TODO()).Then(op).If(cmp)
- },
-
- err: "cannot call If after Then!",
- },
- {
- f: func() {
- defer df()
- kv.Txn(context.TODO()).Else(op).If(cmp)
- },
-
- err: "cannot call If after Else!",
- },
- {
- f: func() {
- defer df()
- kv.Txn(context.TODO()).Then(op).Then(op)
- },
-
- err: "cannot call Then twice!",
- },
- {
- f: func() {
- defer df()
- kv.Txn(context.TODO()).Else(op).Then(op)
- },
-
- err: "cannot call Then after Else!",
- },
- {
- f: func() {
- defer df()
- kv.Txn(context.TODO()).Else(op).Else(op)
- },
-
- err: "cannot call Else twice!",
- },
- }
-
- for i, tt := range tests {
- go tt.f()
- select {
- case err := <-errc:
- if err != tt.err {
- t.Errorf("#%d: got %s, wanted %s", i, err, tt.err)
- }
- case <-time.After(time.Second):
- t.Errorf("#%d: did not panic, wanted panic %s", i, tt.err)
- }
- }
-}
diff --git a/client/v3/utils.go b/client/v3/utils.go
deleted file mode 100644
index 850275877d3..00000000000
--- a/client/v3/utils.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "math/rand"
- "time"
-)
-
-// jitterUp adds random jitter to the duration.
-//
-// This adds or subtracts time from the duration within a given jitter fraction.
-// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
-//
-// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
-func jitterUp(duration time.Duration, jitter float64) time.Duration {
- multiplier := jitter * (rand.Float64()*2 - 1)
- return time.Duration(float64(duration) * (1 + multiplier))
-}
diff --git a/client/v3/watch.go b/client/v3/watch.go
deleted file mode 100644
index 276955cd6e5..00000000000
--- a/client/v3/watch.go
+++ /dev/null
@@ -1,1074 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/mvccpb"
- v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
-)
-
-const (
- EventTypeDelete = mvccpb.DELETE
- EventTypePut = mvccpb.PUT
-
- closeSendErrTimeout = 250 * time.Millisecond
-
- // AutoWatchID is the watcher ID passed in WatchStream.Watch when no
- // user-provided ID is available. If pass, an ID will automatically be assigned.
- AutoWatchID = 0
-
- // InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch.
- InvalidWatchID = -1
-)
-
-var (
- errMsgGRPCInvalidAuthToken = v3rpc.ErrGRPCInvalidAuthToken.Error()
- errMsgGRPCAuthOldRevision = v3rpc.ErrGRPCAuthOldRevision.Error()
-)
-
-type Event mvccpb.Event
-
-type WatchChan <-chan WatchResponse
-
-type Watcher interface {
- // Watch watches on a key or prefix. The watched events will be returned
- // through the returned channel. If revisions waiting to be sent over the
- // watch are compacted, then the watch will be canceled by the server, the
- // client will post a compacted error watch response, and the channel will close.
- // If the requested revision is 0 or unspecified, the returned channel will
- // return watch events that happen after the server receives the watch request.
- // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
- // and "WatchResponse" from this closed channel has zero events and nil "Err()".
- // The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
- // to release the associated resources.
- //
- // If the context is "context.Background/TODO", returned "WatchChan" will
- // not be closed and block until event is triggered, except when server
- // returns a non-recoverable error (e.g. ErrCompacted).
- // For example, when context passed with "WithRequireLeader" and the
- // connected server has no leader (e.g. due to network partition),
- // error "etcdserver: no leader" (ErrNoLeader) will be returned,
- // and then "WatchChan" is closed with non-nil "Err()".
- // In order to prevent a watch stream being stuck in a partitioned node,
- // make sure to wrap context with "WithRequireLeader".
- //
- // Otherwise, as long as the context has not been canceled or timed out,
- // watch will retry on other recoverable errors forever until reconnected.
- //
- // TODO: explicitly set context error in the last "WatchResponse" message and close channel?
- // Currently, client contexts are overwritten with "valCtx" that never closes.
- // TODO(v3.4): configure watch retry policy, limit maximum retry number
- // (see https://github.com/etcd-io/etcd/issues/8980)
- Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
-
- // RequestProgress requests a progress notify response be sent in all watch channels.
- RequestProgress(ctx context.Context) error
-
- // Close closes the watcher and cancels all watch requests.
- Close() error
-}
-
-type WatchResponse struct {
- Header pb.ResponseHeader
- Events []*Event
-
- // CompactRevision is the minimum revision the watcher may receive.
- CompactRevision int64
-
- // Canceled is used to indicate watch failure.
- // If the watch failed and the stream was about to close, before the channel is closed,
- // the channel sends a final response that has Canceled set to true with a non-nil Err().
- Canceled bool
-
- // Created is used to indicate the creation of the watcher.
- Created bool
-
- closeErr error
-
- // cancelReason is a reason of canceling watch
- cancelReason string
-}
-
-// IsCreate returns true if the event tells that the key is newly created.
-func (e *Event) IsCreate() bool {
- return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
-}
-
-// IsModify returns true if the event tells that a new value is put on existing key.
-func (e *Event) IsModify() bool {
- return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
-}
-
-// Err is the error value if this WatchResponse holds an error.
-func (wr *WatchResponse) Err() error {
- switch {
- case wr.closeErr != nil:
- return v3rpc.Error(wr.closeErr)
- case wr.CompactRevision != 0:
- return v3rpc.ErrCompacted
- case wr.Canceled:
- if len(wr.cancelReason) != 0 {
- return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
- }
- return v3rpc.ErrFutureRev
- }
- return nil
-}
-
-// IsProgressNotify returns true if the WatchResponse is progress notification.
-func (wr *WatchResponse) IsProgressNotify() bool {
- return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
-}
-
-// watcher implements the Watcher interface
-type watcher struct {
- remote pb.WatchClient
- callOpts []grpc.CallOption
-
- // mu protects the grpc streams map
- mu sync.Mutex
-
- // streams holds all the active grpc streams keyed by ctx value.
- streams map[string]*watchGrpcStream
- lg *zap.Logger
-}
-
-// watchGrpcStream tracks all watch resources attached to a single grpc stream.
-type watchGrpcStream struct {
- owner *watcher
- remote pb.WatchClient
- callOpts []grpc.CallOption
-
- // ctx controls internal remote.Watch requests
- ctx context.Context
- // ctxKey is the key used when looking up this stream's context
- ctxKey string
- cancel context.CancelFunc
-
- // substreams holds all active watchers on this grpc stream
- substreams map[int64]*watcherStream
- // resuming holds all resuming watchers on this grpc stream
- resuming []*watcherStream
-
- // reqc sends a watch request from Watch() to the main goroutine
- reqc chan watchStreamRequest
- // respc receives data from the watch client
- respc chan *pb.WatchResponse
- // donec closes to broadcast shutdown
- donec chan struct{}
- // errc transmits errors from grpc Recv to the watch stream reconnect logic
- errc chan error
- // closingc gets the watcherStream of closing watchers
- closingc chan *watcherStream
- // wg is Done when all substream goroutines have exited
- wg sync.WaitGroup
-
- // resumec closes to signal that all substreams should begin resuming
- resumec chan struct{}
- // closeErr is the error that closed the watch stream
- closeErr error
-
- lg *zap.Logger
-}
-
-// watchStreamRequest is a union of the supported watch request operation types
-type watchStreamRequest interface {
- toPB() *pb.WatchRequest
-}
-
-// watchRequest is issued by the subscriber to start a new watcher
-type watchRequest struct {
- ctx context.Context
- key string
- end string
- rev int64
-
- // send created notification event if this field is true
- createdNotify bool
- // progressNotify is for progress updates
- progressNotify bool
- // fragmentation should be disabled by default
- // if true, split watch events when total exceeds
- // "--max-request-bytes" flag value + 512-byte
- fragment bool
-
- // filters is the list of events to filter out
- filters []pb.WatchCreateRequest_FilterType
- // get the previous key-value pair before the event happens
- prevKV bool
- // retc receives a chan WatchResponse once the watcher is established
- retc chan chan WatchResponse
-}
-
-// progressRequest is issued by the subscriber to request watch progress
-type progressRequest struct {
-}
-
-// watcherStream represents a registered watcher
-type watcherStream struct {
- // initReq is the request that initiated this request
- initReq watchRequest
-
- // outc publishes watch responses to subscriber
- outc chan WatchResponse
- // recvc buffers watch responses before publishing
- recvc chan *WatchResponse
- // donec closes when the watcherStream goroutine stops.
- donec chan struct{}
- // closing is set to true when stream should be scheduled to shutdown.
- closing bool
- // id is the registered watch id on the grpc stream
- id int64
-
- // buf holds all events received from etcd but not yet consumed by the client
- buf []*WatchResponse
-}
-
-func NewWatcher(c *Client) Watcher {
- return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
-}
-
-func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
- w := &watcher{
- remote: wc,
- streams: make(map[string]*watchGrpcStream),
- }
- if c != nil {
- w.callOpts = c.callOpts
- w.lg = c.lg
- }
- return w
-}
-
-// never closes
-var valCtxCh = make(chan struct{})
-var zeroTime = time.Unix(0, 0)
-
-// ctx with only the values; never Done
-type valCtx struct{ context.Context }
-
-func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
-func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
-func (vc *valCtx) Err() error { return nil }
-
-func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
- ctx, cancel := context.WithCancel(&valCtx{inctx})
- wgs := &watchGrpcStream{
- owner: w,
- remote: w.remote,
- callOpts: w.callOpts,
- ctx: ctx,
- ctxKey: streamKeyFromCtx(inctx),
- cancel: cancel,
- substreams: make(map[int64]*watcherStream),
- respc: make(chan *pb.WatchResponse),
- reqc: make(chan watchStreamRequest),
- donec: make(chan struct{}),
- errc: make(chan error, 1),
- closingc: make(chan *watcherStream),
- resumec: make(chan struct{}),
- lg: w.lg,
- }
- go wgs.run()
- return wgs
-}
-
-// Watch posts a watch request to run() and waits for a new watcher channel
-func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
- ow := opWatch(key, opts...)
-
- var filters []pb.WatchCreateRequest_FilterType
- if ow.filterPut {
- filters = append(filters, pb.WatchCreateRequest_NOPUT)
- }
- if ow.filterDelete {
- filters = append(filters, pb.WatchCreateRequest_NODELETE)
- }
-
- wr := &watchRequest{
- ctx: ctx,
- createdNotify: ow.createdNotify,
- key: string(ow.key),
- end: string(ow.end),
- rev: ow.rev,
- progressNotify: ow.progressNotify,
- fragment: ow.fragment,
- filters: filters,
- prevKV: ow.prevKV,
- retc: make(chan chan WatchResponse, 1),
- }
-
- ok := false
- ctxKey := streamKeyFromCtx(ctx)
-
- var closeCh chan WatchResponse
- for {
- // find or allocate appropriate grpc watch stream
- w.mu.Lock()
- if w.streams == nil {
- // closed
- w.mu.Unlock()
- ch := make(chan WatchResponse)
- close(ch)
- return ch
- }
- wgs := w.streams[ctxKey]
- if wgs == nil {
- wgs = w.newWatcherGrpcStream(ctx)
- w.streams[ctxKey] = wgs
- }
- donec := wgs.donec
- reqc := wgs.reqc
- w.mu.Unlock()
-
- // couldn't create channel; return closed channel
- if closeCh == nil {
- closeCh = make(chan WatchResponse, 1)
- }
-
- // submit request
- select {
- case reqc <- wr:
- ok = true
- case <-wr.ctx.Done():
- ok = false
- case <-donec:
- ok = false
- if wgs.closeErr != nil {
- closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
- break
- }
- // retry; may have dropped stream from no ctxs
- continue
- }
-
- // receive channel
- if ok {
- select {
- case ret := <-wr.retc:
- return ret
- case <-ctx.Done():
- case <-donec:
- if wgs.closeErr != nil {
- closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
- break
- }
- // retry; may have dropped stream from no ctxs
- continue
- }
- }
- break
- }
-
- close(closeCh)
- return closeCh
-}
-
-func (w *watcher) Close() (err error) {
- w.mu.Lock()
- streams := w.streams
- w.streams = nil
- w.mu.Unlock()
- for _, wgs := range streams {
- if werr := wgs.close(); werr != nil {
- err = werr
- }
- }
- // Consider context.Canceled as a successful close
- if err == context.Canceled {
- err = nil
- }
- return err
-}
-
-// RequestProgress requests a progress notify response be sent in all watch channels.
-func (w *watcher) RequestProgress(ctx context.Context) (err error) {
- ctxKey := streamKeyFromCtx(ctx)
-
- w.mu.Lock()
- if w.streams == nil {
- w.mu.Unlock()
- return errors.New("no stream found for context")
- }
- wgs := w.streams[ctxKey]
- if wgs == nil {
- wgs = w.newWatcherGrpcStream(ctx)
- w.streams[ctxKey] = wgs
- }
- donec := wgs.donec
- reqc := wgs.reqc
- w.mu.Unlock()
-
- pr := &progressRequest{}
-
- select {
- case reqc <- pr:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- case <-donec:
- if wgs.closeErr != nil {
- return wgs.closeErr
- }
- // retry; may have dropped stream from no ctxs
- return w.RequestProgress(ctx)
- }
-}
-
-func (w *watchGrpcStream) close() (err error) {
- w.cancel()
- <-w.donec
- select {
- case err = <-w.errc:
- default:
- }
- return toErr(w.ctx, err)
-}
-
-func (w *watcher) closeStream(wgs *watchGrpcStream) {
- w.mu.Lock()
- close(wgs.donec)
- wgs.cancel()
- if w.streams != nil {
- delete(w.streams, wgs.ctxKey)
- }
- w.mu.Unlock()
-}
-
-func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
- // check watch ID for backward compatibility (<= v3.3)
- if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") {
- w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
- // failed; no channel
- close(ws.recvc)
- return
- }
- ws.id = resp.WatchId
- w.substreams[ws.id] = ws
-}
-
-func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
- select {
- case ws.outc <- *resp:
- case <-ws.initReq.ctx.Done():
- case <-time.After(closeSendErrTimeout):
- }
- close(ws.outc)
-}
-
-func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
- // send channel response in case stream was never established
- select {
- case ws.initReq.retc <- ws.outc:
- default:
- }
- // close subscriber's channel
- if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
- go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
- } else if ws.outc != nil {
- close(ws.outc)
- }
- if ws.id != InvalidWatchID {
- delete(w.substreams, ws.id)
- return
- }
- for i := range w.resuming {
- if w.resuming[i] == ws {
- w.resuming[i] = nil
- return
- }
- }
-}
-
-// run is the root of the goroutines for managing a watcher client
-func (w *watchGrpcStream) run() {
- var wc pb.Watch_WatchClient
- var closeErr error
-
- // substreams marked to close but goroutine still running; needed for
- // avoiding double-closing recvc on grpc stream teardown
- closing := make(map[*watcherStream]struct{})
-
- defer func() {
- w.closeErr = closeErr
- // shutdown substreams and resuming substreams
- for _, ws := range w.substreams {
- if _, ok := closing[ws]; !ok {
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
- }
- for _, ws := range w.resuming {
- if _, ok := closing[ws]; ws != nil && !ok {
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
- }
- w.joinSubstreams()
- for range closing {
- w.closeSubstream(<-w.closingc)
- }
- w.wg.Wait()
- w.owner.closeStream(w)
- }()
-
- // start a stream with the etcd grpc server
- if wc, closeErr = w.newWatchClient(); closeErr != nil {
- return
- }
-
- cancelSet := make(map[int64]struct{})
-
- var cur *pb.WatchResponse
- backoff := time.Millisecond
- for {
- select {
- // Watch() requested
- case req := <-w.reqc:
- switch wreq := req.(type) {
- case *watchRequest:
- outc := make(chan WatchResponse, 1)
- // TODO: pass custom watch ID?
- ws := &watcherStream{
- initReq: *wreq,
- id: InvalidWatchID,
- outc: outc,
- // unbuffered so resumes won't cause repeat events
- recvc: make(chan *WatchResponse),
- }
-
- ws.donec = make(chan struct{})
- w.wg.Add(1)
- go w.serveSubstream(ws, w.resumec)
-
- // queue up for watcher creation/resume
- w.resuming = append(w.resuming, ws)
- if len(w.resuming) == 1 {
- // head of resume queue, can register a new watcher
- if err := wc.Send(ws.initReq.toPB()); err != nil {
- w.lg.Debug("error when sending request", zap.Error(err))
- }
- }
- case *progressRequest:
- if err := wc.Send(wreq.toPB()); err != nil {
- w.lg.Debug("error when sending request", zap.Error(err))
- }
- }
-
- // new events from the watch client
- case pbresp := <-w.respc:
- if cur == nil || pbresp.Created || pbresp.Canceled {
- cur = pbresp
- } else if cur != nil && cur.WatchId == pbresp.WatchId {
- // merge new events
- cur.Events = append(cur.Events, pbresp.Events...)
- // update "Fragment" field; last response with "Fragment" == false
- cur.Fragment = pbresp.Fragment
- }
-
- switch {
- case pbresp.Created:
- if pbresp.Canceled && shouldRetryWatch(pbresp.CancelReason) {
- var newErr error
- if wc, newErr = w.newWatchClient(); newErr != nil {
- w.lg.Error("failed to create a new watch client", zap.Error(newErr))
- return
- }
-
- if len(w.resuming) != 0 {
- if ws := w.resuming[0]; ws != nil {
- if err := wc.Send(ws.initReq.toPB()); err != nil {
- w.lg.Debug("error when sending request", zap.Error(err))
- }
- }
- }
-
- cur = nil
- continue
- }
-
- // response to head of queue creation
- if len(w.resuming) != 0 {
- if ws := w.resuming[0]; ws != nil {
- w.addSubstream(pbresp, ws)
- w.dispatchEvent(pbresp)
- w.resuming[0] = nil
- }
- }
-
- if ws := w.nextResume(); ws != nil {
- if err := wc.Send(ws.initReq.toPB()); err != nil {
- w.lg.Debug("error when sending request", zap.Error(err))
- }
- }
-
- // reset for next iteration
- cur = nil
-
- case pbresp.Canceled && pbresp.CompactRevision == 0:
- delete(cancelSet, pbresp.WatchId)
- if ws, ok := w.substreams[pbresp.WatchId]; ok {
- // signal to stream goroutine to update closingc
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
-
- // reset for next iteration
- cur = nil
-
- case cur.Fragment:
- // watch response events are still fragmented
- // continue to fetch next fragmented event arrival
- continue
-
- default:
- // dispatch to appropriate watch stream
- ok := w.dispatchEvent(cur)
-
- // reset for next iteration
- cur = nil
-
- if ok {
- break
- }
-
- // watch response on unexpected watch id; cancel id
- if _, ok := cancelSet[pbresp.WatchId]; ok {
- break
- }
-
- cancelSet[pbresp.WatchId] = struct{}{}
- cr := &pb.WatchRequest_CancelRequest{
- CancelRequest: &pb.WatchCancelRequest{
- WatchId: pbresp.WatchId,
- },
- }
- req := &pb.WatchRequest{RequestUnion: cr}
- w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId))
- if err := wc.Send(req); err != nil {
- w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err))
- }
- }
-
- // watch client failed on Recv; spawn another if possible
- case err := <-w.errc:
- if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
- closeErr = err
- return
- }
- backoff = w.backoffIfUnavailable(backoff, err)
- if wc, closeErr = w.newWatchClient(); closeErr != nil {
- return
- }
- if ws := w.nextResume(); ws != nil {
- if err := wc.Send(ws.initReq.toPB()); err != nil {
- w.lg.Debug("error when sending request", zap.Error(err))
- }
- }
- cancelSet = make(map[int64]struct{})
-
- case <-w.ctx.Done():
- return
-
- case ws := <-w.closingc:
- w.closeSubstream(ws)
- delete(closing, ws)
- // no more watchers on this stream, shutdown, skip cancellation
- if len(w.substreams)+len(w.resuming) == 0 {
- return
- }
- if ws.id != InvalidWatchID {
- // client is closing an established watch; close it on the server proactively instead of waiting
- // to close when the next message arrives
- cancelSet[ws.id] = struct{}{}
- cr := &pb.WatchRequest_CancelRequest{
- CancelRequest: &pb.WatchCancelRequest{
- WatchId: ws.id,
- },
- }
- req := &pb.WatchRequest{RequestUnion: cr}
- w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id))
- if err := wc.Send(req); err != nil {
- w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err))
- }
- }
- }
- }
-}
-
-func shouldRetryWatch(cancelReason string) bool {
- if cancelReason == "" {
- return false
- }
- return (cancelReason == errMsgGRPCInvalidAuthToken) ||
- (cancelReason == errMsgGRPCAuthOldRevision)
-}
-
-// nextResume chooses the next resuming to register with the grpc stream. Abandoned
-// streams are marked as nil in the queue since the head must wait for its inflight registration.
-func (w *watchGrpcStream) nextResume() *watcherStream {
- for len(w.resuming) != 0 {
- if w.resuming[0] != nil {
- return w.resuming[0]
- }
- w.resuming = w.resuming[1:len(w.resuming)]
- }
- return nil
-}
-
-// dispatchEvent sends a WatchResponse to the appropriate watcher stream
-func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
- events := make([]*Event, len(pbresp.Events))
- for i, ev := range pbresp.Events {
- events[i] = (*Event)(ev)
- }
- // TODO: return watch ID?
- wr := &WatchResponse{
- Header: *pbresp.Header,
- Events: events,
- CompactRevision: pbresp.CompactRevision,
- Created: pbresp.Created,
- Canceled: pbresp.Canceled,
- cancelReason: pbresp.CancelReason,
- }
-
- // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to
- // indicate they should be broadcast.
- if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID {
- return w.broadcastResponse(wr)
- }
-
- return w.unicastResponse(wr, pbresp.WatchId)
-
-}
-
-// broadcastResponse send a watch response to all watch substreams.
-func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
- for _, ws := range w.substreams {
- select {
- case ws.recvc <- wr:
- case <-ws.donec:
- }
- }
- return true
-}
-
-// unicastResponse sends a watch response to a specific watch substream.
-func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
- ws, ok := w.substreams[watchId]
- if !ok {
- return false
- }
- select {
- case ws.recvc <- wr:
- case <-ws.donec:
- return false
- }
- return true
-}
-
-// serveWatchClient forwards messages from the grpc stream to run()
-func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
- for {
- resp, err := wc.Recv()
- if err != nil {
- select {
- case w.errc <- err:
- case <-w.donec:
- }
- return
- }
- select {
- case w.respc <- resp:
- case <-w.donec:
- return
- }
- }
-}
-
-// serveSubstream forwards watch responses from run() to the subscriber
-func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
- if ws.closing {
- panic("created substream goroutine but substream is closing")
- }
-
- // nextRev is the minimum expected next revision
- nextRev := ws.initReq.rev
- resuming := false
- defer func() {
- if !resuming {
- ws.closing = true
- }
- close(ws.donec)
- if !resuming {
- w.closingc <- ws
- }
- w.wg.Done()
- }()
-
- emptyWr := &WatchResponse{}
- for {
- curWr := emptyWr
- outc := ws.outc
-
- if len(ws.buf) > 0 {
- curWr = ws.buf[0]
- } else {
- outc = nil
- }
- select {
- case outc <- *curWr:
- if ws.buf[0].Err() != nil {
- return
- }
- ws.buf[0] = nil
- ws.buf = ws.buf[1:]
- case wr, ok := <-ws.recvc:
- if !ok {
- // shutdown from closeSubstream
- return
- }
-
- if wr.Created {
- if ws.initReq.retc != nil {
- ws.initReq.retc <- ws.outc
- // to prevent next write from taking the slot in buffered channel
- // and posting duplicate create events
- ws.initReq.retc = nil
-
- // send first creation event only if requested
- if ws.initReq.createdNotify {
- ws.outc <- *wr
- }
- // once the watch channel is returned, a current revision
- // watch must resume at the store revision. This is necessary
- // for the following case to work as expected:
- // wch := m1.Watch("a")
- // m2.Put("a", "b")
- // <-wch
- // If the revision is only bound on the first observed event,
- // if wch is disconnected before the Put is issued, then reconnects
- // after it is committed, it'll miss the Put.
- if ws.initReq.rev == 0 {
- nextRev = wr.Header.Revision
- }
- }
- } else {
- // current progress of watch; <= store revision
- nextRev = wr.Header.Revision
- }
-
- if len(wr.Events) > 0 {
- nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
- }
- ws.initReq.rev = nextRev
-
- // created event is already sent above,
- // watcher should not post duplicate events
- if wr.Created {
- continue
- }
-
- // TODO pause channel if buffer gets too large
- ws.buf = append(ws.buf, wr)
- case <-w.ctx.Done():
- return
- case <-ws.initReq.ctx.Done():
- return
- case <-resumec:
- resuming = true
- return
- }
- }
- // lazily send cancel message if events on missing id
-}
-
-func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
- // mark all substreams as resuming
- close(w.resumec)
- w.resumec = make(chan struct{})
- w.joinSubstreams()
- for _, ws := range w.substreams {
- ws.id = InvalidWatchID
- w.resuming = append(w.resuming, ws)
- }
- // strip out nils, if any
- var resuming []*watcherStream
- for _, ws := range w.resuming {
- if ws != nil {
- resuming = append(resuming, ws)
- }
- }
- w.resuming = resuming
- w.substreams = make(map[int64]*watcherStream)
-
- // connect to grpc stream while accepting watcher cancelation
- stopc := make(chan struct{})
- donec := w.waitCancelSubstreams(stopc)
- wc, err := w.openWatchClient()
- close(stopc)
- <-donec
-
- // serve all non-closing streams, even if there's a client error
- // so that the teardown path can shutdown the streams as expected.
- for _, ws := range w.resuming {
- if ws.closing {
- continue
- }
- ws.donec = make(chan struct{})
- w.wg.Add(1)
- go w.serveSubstream(ws, w.resumec)
- }
-
- if err != nil {
- return nil, v3rpc.Error(err)
- }
-
- // receive data from new grpc stream
- go w.serveWatchClient(wc)
- return wc, nil
-}
-
-func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
- var wg sync.WaitGroup
- wg.Add(len(w.resuming))
- donec := make(chan struct{})
- for i := range w.resuming {
- go func(ws *watcherStream) {
- defer wg.Done()
- if ws.closing {
- if ws.initReq.ctx.Err() != nil && ws.outc != nil {
- close(ws.outc)
- ws.outc = nil
- }
- return
- }
- select {
- case <-ws.initReq.ctx.Done():
- // closed ws will be removed from resuming
- ws.closing = true
- close(ws.outc)
- ws.outc = nil
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
- w.closingc <- ws
- }()
- case <-stopc:
- }
- }(w.resuming[i])
- }
- go func() {
- defer close(donec)
- wg.Wait()
- }()
- return donec
-}
-
-// joinSubstreams waits for all substream goroutines to complete.
-func (w *watchGrpcStream) joinSubstreams() {
- for _, ws := range w.substreams {
- <-ws.donec
- }
- for _, ws := range w.resuming {
- if ws != nil {
- <-ws.donec
- }
- }
-}
-
-var maxBackoff = 100 * time.Millisecond
-
-func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration {
- if isUnavailableErr(w.ctx, err) {
- // retry, but backoff
- if backoff < maxBackoff {
- // 25% backoff factor
- backoff = backoff + backoff/4
- if backoff > maxBackoff {
- backoff = maxBackoff
- }
- }
- time.Sleep(backoff)
- }
- return backoff
-}
-
-// openWatchClient retries opening a watch client until success or halt.
-// manually retry in case "ws==nil && err==nil"
-// TODO: remove FailFast=false
-func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
- backoff := time.Millisecond
- for {
- select {
- case <-w.ctx.Done():
- if err == nil {
- return nil, w.ctx.Err()
- }
- return nil, err
- default:
- }
- if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
- break
- }
- if isHaltErr(w.ctx, err) {
- return nil, v3rpc.Error(err)
- }
- backoff = w.backoffIfUnavailable(backoff, err)
- }
- return ws, nil
-}
-
-// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
-func (wr *watchRequest) toPB() *pb.WatchRequest {
- req := &pb.WatchCreateRequest{
- StartRevision: wr.rev,
- Key: []byte(wr.key),
- RangeEnd: []byte(wr.end),
- ProgressNotify: wr.progressNotify,
- Filters: wr.filters,
- PrevKv: wr.prevKV,
- Fragment: wr.fragment,
- }
- cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
- return &pb.WatchRequest{RequestUnion: cr}
-}
-
-// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
-func (pr *progressRequest) toPB() *pb.WatchRequest {
- req := &pb.WatchProgressRequest{}
- cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
- return &pb.WatchRequest{RequestUnion: cr}
-}
-
-func streamKeyFromCtx(ctx context.Context) string {
- if md, ok := metadata.FromOutgoingContext(ctx); ok {
- return fmt.Sprintf("%+v", md)
- }
- return ""
-}
diff --git a/client/v3/watch_test.go b/client/v3/watch_test.go
deleted file mode 100644
index 0a94f08cd56..00000000000
--- a/client/v3/watch_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
-)
-
-func TestEvent(t *testing.T) {
- tests := []struct {
- ev *Event
- isCreate bool
- isModify bool
- }{{
- ev: &Event{
- Type: EventTypePut,
- Kv: &mvccpb.KeyValue{
- CreateRevision: 3,
- ModRevision: 3,
- },
- },
- isCreate: true,
- }, {
- ev: &Event{
- Type: EventTypePut,
- Kv: &mvccpb.KeyValue{
- CreateRevision: 3,
- ModRevision: 4,
- },
- },
- isModify: true,
- }}
- for i, tt := range tests {
- if tt.isCreate && !tt.ev.IsCreate() {
- t.Errorf("#%d: event should be Create event", i)
- }
- if tt.isModify && !tt.ev.IsModify() {
- t.Errorf("#%d: event should be Modify event", i)
- }
- }
-}
-
-func TestShouldRetryWatch(t *testing.T) {
- testCases := []struct {
- name string
- msg string
- expectedRetry bool
- }{
- {
- name: "equal to ErrGRPCInvalidAuthToken",
- msg: rpctypes.ErrGRPCInvalidAuthToken.Error(),
- expectedRetry: true,
- },
- {
- name: "equal to ErrGRPCAuthOldRevision",
- msg: rpctypes.ErrGRPCAuthOldRevision.Error(),
- expectedRetry: true,
- },
- {
- name: "valid grpc error but not equal to ErrGRPCInvalidAuthToken or ErrGRPCAuthOldRevision",
- msg: rpctypes.ErrGRPCUserEmpty.Error(),
- expectedRetry: false,
- },
- {
- name: "invalid grpc error and not equal to ErrGRPCInvalidAuthToken or ErrGRPCAuthOldRevision",
- msg: "whatever error message",
- expectedRetry: false,
- },
- }
-
- for _, tc := range testCases {
- tc := tc
- t.Run(tc.name, func(t *testing.T) {
- assert.Equal(t, tc.expectedRetry, shouldRetryWatch(tc.msg))
- })
- }
-}
diff --git a/client/v3/yaml/config.go b/client/v3/yaml/config.go
deleted file mode 100644
index 99d07236433..00000000000
--- a/client/v3/yaml/config.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package yaml handles yaml-formatted clientv3 configuration data.
-package yaml
-
-import (
- "crypto/tls"
- "crypto/x509"
- "os"
-
- "sigs.k8s.io/yaml"
-
- "go.etcd.io/etcd/client/pkg/v3/tlsutil"
- clientv3 "go.etcd.io/etcd/client/v3"
-)
-
-type yamlConfig struct {
- clientv3.Config
-
- InsecureTransport bool `json:"insecure-transport"`
- InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
- Certfile string `json:"cert-file"`
- Keyfile string `json:"key-file"`
- TrustedCAfile string `json:"trusted-ca-file"`
-
- // CAfile is being deprecated. Use 'TrustedCAfile' instead.
- // TODO: deprecate this in v4
- CAfile string `json:"ca-file"`
-}
-
-// NewConfig creates a new clientv3.Config from a yaml file.
-func NewConfig(fpath string) (*clientv3.Config, error) {
- b, err := os.ReadFile(fpath)
- if err != nil {
- return nil, err
- }
-
- yc := &yamlConfig{}
-
- err = yaml.Unmarshal(b, yc)
- if err != nil {
- return nil, err
- }
-
- if yc.InsecureTransport {
- return &yc.Config, nil
- }
-
- var (
- cert *tls.Certificate
- cp *x509.CertPool
- )
-
- if yc.Certfile != "" && yc.Keyfile != "" {
- cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil)
- if err != nil {
- return nil, err
- }
- }
-
- if yc.TrustedCAfile != "" {
- cp, err = tlsutil.NewCertPool([]string{yc.TrustedCAfile})
- if err != nil {
- return nil, err
- }
- }
-
- tlscfg := &tls.Config{
- MinVersion: tls.VersionTLS12,
- InsecureSkipVerify: yc.InsecureSkipTLSVerify,
- RootCAs: cp,
- }
- if cert != nil {
- tlscfg.Certificates = []tls.Certificate{*cert}
- }
- yc.Config.TLS = tlscfg
-
- return &yc.Config, nil
-}
diff --git a/client/v3/yaml/config_test.go b/client/v3/yaml/config_test.go
deleted file mode 100644
index ec8441b1b63..00000000000
--- a/client/v3/yaml/config_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package yaml
-
-import (
- "log"
- "os"
- "reflect"
- "testing"
-
- "sigs.k8s.io/yaml"
-)
-
-var (
- certPath = "../../../tests/fixtures/server.crt"
- privateKeyPath = "../../../tests/fixtures/server.key.insecure"
- caPath = "../../../tests/fixtures/ca.crt"
-)
-
-func TestConfigFromFile(t *testing.T) {
- tests := []struct {
- ym *yamlConfig
-
- werr bool
- }{
- {
- &yamlConfig{},
- false,
- },
- {
- &yamlConfig{
- InsecureTransport: true,
- },
- false,
- },
- {
- &yamlConfig{
- Keyfile: privateKeyPath,
- Certfile: certPath,
- TrustedCAfile: caPath,
- InsecureSkipTLSVerify: true,
- },
- false,
- },
- {
- &yamlConfig{
- Keyfile: "bad",
- Certfile: "bad",
- },
- true,
- },
- {
- &yamlConfig{
- Keyfile: privateKeyPath,
- Certfile: certPath,
- TrustedCAfile: "bad",
- },
- true,
- },
- }
-
- for i, tt := range tests {
- tmpfile, err := os.CreateTemp("", "clientcfg")
- if err != nil {
- log.Fatal(err)
- }
-
- b, err := yaml.Marshal(tt.ym)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = tmpfile.Write(b)
- if err != nil {
- t.Fatal(err)
- }
- err = tmpfile.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- cfg, cerr := NewConfig(tmpfile.Name())
- if cerr != nil && !tt.werr {
- t.Errorf("#%d: err = %v, want %v", i, cerr, tt.werr)
- continue
- }
- if cerr != nil {
- os.Remove(tmpfile.Name())
- continue
- }
-
- if !reflect.DeepEqual(cfg.Endpoints, tt.ym.Endpoints) {
- t.Errorf("#%d: endpoint = %v, want %v", i, cfg.Endpoints, tt.ym.Endpoints)
- }
-
- if tt.ym.InsecureTransport != (cfg.TLS == nil) {
- t.Errorf("#%d: insecureTransport = %v, want %v", i, cfg.TLS == nil, tt.ym.InsecureTransport)
- }
-
- if !tt.ym.InsecureTransport {
- if tt.ym.Certfile != "" && len(cfg.TLS.Certificates) == 0 {
- t.Errorf("#%d: failed to load in cert", i)
- }
- if tt.ym.TrustedCAfile != "" && cfg.TLS.RootCAs == nil {
- t.Errorf("#%d: failed to load in ca cert", i)
- }
- if cfg.TLS.InsecureSkipVerify != tt.ym.InsecureSkipTLSVerify {
- t.Errorf("#%d: skipTLSVeify = %v, want %v", i, cfg.TLS.InsecureSkipVerify, tt.ym.InsecureSkipTLSVerify)
- }
- }
-
- os.Remove(tmpfile.Name())
- }
-}
diff --git a/client/pkg/fileutil/dir_unix.go b/client_sdk/pkg/fileutil/dir_unix.go
similarity index 95%
rename from client/pkg/fileutil/dir_unix.go
rename to client_sdk/pkg/fileutil/dir_unix.go
index add54c6315d..8c81de30bb0 100644
--- a/client/pkg/fileutil/dir_unix.go
+++ b/client_sdk/pkg/fileutil/dir_unix.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build !windows
+// +build !windows
package fileutil
@@ -20,7 +21,7 @@ import "os"
const (
// PrivateDirMode grants owner to make/remove files inside the directory.
- PrivateDirMode = 0700
+ PrivateDirMode = 0o700
)
// OpenDir opens a directory for syncing.
diff --git a/client/pkg/fileutil/dir_windows.go b/client_sdk/pkg/fileutil/dir_windows.go
similarity index 97%
rename from client/pkg/fileutil/dir_windows.go
rename to client_sdk/pkg/fileutil/dir_windows.go
index fd3415d5944..3414e96fcac 100644
--- a/client/pkg/fileutil/dir_windows.go
+++ b/client_sdk/pkg/fileutil/dir_windows.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build windows
+// +build windows
package fileutil
@@ -23,7 +24,7 @@ import (
const (
// PrivateDirMode grants owner to make/remove files inside the directory.
- PrivateDirMode = 0777
+ PrivateDirMode = 0o777
)
// OpenDir opens a directory in windows with write access for syncing.
diff --git a/client/pkg/fileutil/doc.go b/client_sdk/pkg/fileutil/doc.go
similarity index 100%
rename from client/pkg/fileutil/doc.go
rename to client_sdk/pkg/fileutil/doc.go
diff --git a/client_sdk/pkg/fileutil/fileutil.go b/client_sdk/pkg/fileutil/fileutil.go
new file mode 100644
index 00000000000..4bac9fb596f
--- /dev/null
+++ b/client_sdk/pkg/fileutil/fileutil.go
@@ -0,0 +1,168 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "go.uber.org/zap"
+)
+
+const (
+ // PrivateFileMode 授予所有者读/写文件的权限.
+ PrivateFileMode = 0o600
+)
+
+// IsDirWriteable checks if dir is writable by writing and removing a file
+// to dir. It returns nil if dir is writable.
+func IsDirWriteable(dir string) error {
+ f, err := filepath.Abs(filepath.Join(dir, ".touch"))
+ if err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
+ return err
+ }
+ return os.Remove(f)
+}
+
+// TouchDirAll 与os.MkdirAll类似.如果任何目录不存在,它就用0700权限创建目录.TouchDirAll也确保给定的目录是可写的.
+func TouchDirAll(dir string) error {
+ // 如果路径已经是一个目录,MkdirAll不做任何事情,并返回nil,所以,首先检查dir是否存在,并有预期的权限模式.
+ if Exist(dir) {
+ err := CheckDirPermission(dir, PrivateDirMode)
+ if err != nil {
+ lg, _ := zap.NewProduction()
+ if lg == nil {
+ lg = zap.NewExample()
+ }
+ lg.Warn("check file permission", zap.Error(err))
+ }
+ } else {
+ err := os.MkdirAll(dir, PrivateDirMode)
+ if err != nil {
+ // if mkdirAll("a/text") and "text" is not
+ // a directory, this will return syscall.ENOTDIR
+ return err
+ }
+ }
+
+ return IsDirWriteable(dir)
+}
+
+// CreateDirAll is similar to TouchDirAll but returns error
+// if the deepest directory was not empty.
+func CreateDirAll(dir string) error {
+ err := TouchDirAll(dir)
+ if err == nil {
+ var ns []string
+ ns, err = ReadDir(dir)
+ if err != nil {
+ return err
+ }
+ if len(ns) != 0 {
+ err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
+ }
+ }
+ return err
+}
+
+// Exist 返回文件或目录是否存在
+func Exist(name string) bool {
+ _, err := os.Stat(name)
+ return err == nil
+}
+
+// DirEmpty 返回文件是否创建,以及是一个空目录
+func DirEmpty(name string) bool {
+ ns, err := ReadDir(name)
+ return len(ns) == 0 && err == nil
+}
+
+// ZeroToEnd 清空当前之后的数据,并固定分配文件空间
+func ZeroToEnd(f *os.File) error {
+ // offset是从0开始的, 可以比当前的文件内容长度大,多出的部分会用空(0)来代替
+ off, err := f.Seek(0, io.SeekCurrent) // 返回当前的偏移量(相对开头)
+ if err != nil {
+ return err
+ }
+ lenf, lerr := f.Seek(0, io.SeekEnd) // 返回 文件大小
+ if lerr != nil {
+ return lerr
+ }
+ // 删除后面的内容,不管当前的偏移量在哪儿,都是从头开始截取不会影响当前的偏移量;改变文件的大小
+ if err = f.Truncate(off); err != nil {
+ return err
+ }
+ if err = Preallocate(f, lenf, true); err != nil {
+ return err
+ } // 预分配空间
+ _, err = f.Seek(off, io.SeekStart) // 跳转到 要接着写的地方
+ return err
+}
+
+// CheckDirPermission checks permission on an existing dir.
+// Returns error if dir is empty or exist with a different permission than specified.
+func CheckDirPermission(dir string, perm os.FileMode) error {
+ if !Exist(dir) {
+ return fmt.Errorf("directory %q empty, cannot check permission", dir)
+ }
+ // check the existing permission on the directory
+ dirInfo, err := os.Stat(dir)
+ if err != nil {
+ return err
+ }
+ dirMode := dirInfo.Mode().Perm()
+ if dirMode != perm {
+ err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode))
+ return err
+ }
+ return nil
+}
+
+// RemoveMatchFile 移除格式匹配的文件
+func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string) bool) error {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ if !Exist(dir) {
+ return fmt.Errorf("目录不存在 %s", dir)
+ }
+ fileNames, err := ReadDir(dir)
+ if err != nil {
+ return err
+ }
+ var removeFailedFiles []string
+ for _, fileName := range fileNames {
+ if matchFunc(fileName) {
+ file := filepath.Join(dir, fileName)
+ if err = os.Remove(file); err != nil {
+ removeFailedFiles = append(removeFailedFiles, fileName)
+ lg.Error("删除文件失败",
+ zap.String("file", file),
+ zap.Error(err))
+ continue
+ }
+ }
+ }
+ if len(removeFailedFiles) != 0 {
+ return fmt.Errorf("删除文件(s) %v error", removeFailedFiles)
+ }
+ return nil
+}
diff --git a/client_sdk/pkg/fileutil/lock.go b/client_sdk/pkg/fileutil/lock.go
new file mode 100644
index 00000000000..c6a026028e5
--- /dev/null
+++ b/client_sdk/pkg/fileutil/lock.go
@@ -0,0 +1,26 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+)
+
+var ErrLocked = errors.New("fileutil: 文件已被锁定")
+
+type LockedFile struct {
+ *os.File // 文件句柄
+}
diff --git a/client/pkg/fileutil/lock_flock.go b/client_sdk/pkg/fileutil/lock_flock.go
similarity index 88%
rename from client/pkg/fileutil/lock_flock.go
rename to client_sdk/pkg/fileutil/lock_flock.go
index a4e5707a659..0133460e49e 100644
--- a/client/pkg/fileutil/lock_flock.go
+++ b/client_sdk/pkg/fileutil/lock_flock.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build !windows && !plan9 && !solaris
+// +build !windows,!plan9,!solaris
package fileutil
@@ -21,6 +22,8 @@ import (
"syscall"
)
+// 同时尝试在某个文件上放置一个独占锁 设置LOCK_NB 如果已被锁定会返回EWOULDBLOCK
+
func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := os.OpenFile(path, flag, perm)
if err != nil {
@@ -36,6 +39,7 @@ func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, err
return &LockedFile{f}, nil
}
+// 获取文件锁,阻塞等待
func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := os.OpenFile(path, flag, perm)
if err != nil {
diff --git a/client/pkg/fileutil/lock_linux.go b/client_sdk/pkg/fileutil/lock_linux.go
similarity index 96%
rename from client/pkg/fileutil/lock_linux.go
rename to client_sdk/pkg/fileutil/lock_linux.go
index c33a2f4afc7..1183d8a1996 100644
--- a/client/pkg/fileutil/lock_linux.go
+++ b/client_sdk/pkg/fileutil/lock_linux.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build linux
+// +build linux
package fileutil
@@ -39,7 +40,7 @@ var (
}
linuxTryLockFile = flockTryLockFile
- linuxLockFile = flockLockFile
+ linuxLockFile = flockLockFile // 文件锁
)
func init() {
@@ -72,6 +73,7 @@ func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error
return &LockedFile{f}, nil
}
+// LockFile OK
func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return linuxLockFile(path, flag, perm)
}
diff --git a/client/pkg/fileutil/lock_plan9.go b/client_sdk/pkg/fileutil/lock_plan9.go
similarity index 100%
rename from client/pkg/fileutil/lock_plan9.go
rename to client_sdk/pkg/fileutil/lock_plan9.go
diff --git a/client/pkg/fileutil/lock_solaris.go b/client_sdk/pkg/fileutil/lock_solaris.go
similarity index 98%
rename from client/pkg/fileutil/lock_solaris.go
rename to client_sdk/pkg/fileutil/lock_solaris.go
index 2e892fecc65..683cc1db9c4 100644
--- a/client/pkg/fileutil/lock_solaris.go
+++ b/client_sdk/pkg/fileutil/lock_solaris.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build solaris
+// +build solaris
package fileutil
diff --git a/client/pkg/fileutil/lock_unix.go b/client_sdk/pkg/fileutil/lock_unix.go
similarity index 95%
rename from client/pkg/fileutil/lock_unix.go
rename to client_sdk/pkg/fileutil/lock_unix.go
index 05db5367410..d89027e1fad 100644
--- a/client/pkg/fileutil/lock_unix.go
+++ b/client_sdk/pkg/fileutil/lock_unix.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build !windows && !plan9 && !solaris && !linux
+// +build !windows,!plan9,!solaris,!linux
package fileutil
diff --git a/client_sdk/pkg/fileutil/lock_windows.go b/client_sdk/pkg/fileutil/lock_windows.go
new file mode 100644
index 00000000000..5cbf2bc3d5e
--- /dev/null
+++ b/client_sdk/pkg/fileutil/lock_windows.go
@@ -0,0 +1,126 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+// +build windows
+
+package fileutil
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procLockFileEx = modkernel32.NewProc("LockFileEx")
+
+ errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file")
+)
+
+const (
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+ LOCKFILE_EXCLUSIVE_LOCK = 2
+ LOCKFILE_FAIL_IMMEDIATELY = 1
+
+ // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
+ errLockViolation syscall.Errno = 0x21
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := open(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := open(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func open(path string, flag int, perm os.FileMode) (*os.File, error) {
+ if path == "" {
+ return nil, fmt.Errorf("cannot open empty filename")
+ }
+ var access uint32
+ switch flag {
+ case syscall.O_RDONLY:
+ access = syscall.GENERIC_READ
+ case syscall.O_WRONLY:
+ access = syscall.GENERIC_WRITE
+ case syscall.O_RDWR:
+ access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+ case syscall.O_WRONLY | syscall.O_CREAT:
+ access = syscall.GENERIC_ALL
+ default:
+ panic(fmt.Errorf("flag %v is not supported", flag))
+ }
+ fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
+ access,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil,
+ syscall.OPEN_ALWAYS,
+ syscall.FILE_ATTRIBUTE_NORMAL,
+ 0)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), path), nil
+}
+
+func lockFile(fd syscall.Handle, flags uint32) error {
+ var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK
+ flag |= flags
+ if fd == syscall.InvalidHandle {
+ return nil
+ }
+ err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{})
+ if err == nil {
+ return nil
+ } else if err.Error() == errLocked.Error() {
+ return ErrLocked
+ } else if err != errLockViolation {
+ return err
+ }
+ return nil
+}
+
+func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ var reserved uint32 = 0
+ r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return err
+}
diff --git a/client_sdk/pkg/fileutil/over_preallocate.go b/client_sdk/pkg/fileutil/over_preallocate.go
new file mode 100644
index 00000000000..a9fac258923
--- /dev/null
+++ b/client_sdk/pkg/fileutil/over_preallocate.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "io"
+ "os"
+)
+
+// Preallocate 预先分配文件空间
+func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
+ if sizeInBytes == 0 {
+ return nil
+ }
+ if extendFile {
+ return preallocExtend(f, sizeInBytes)
+ }
+ return preallocFixed(f, sizeInBytes)
+}
+
+// 清除多余的空间
+func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
+ curOff, err := f.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ size, err := f.Seek(sizeInBytes, io.SeekEnd)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Seek(curOff, io.SeekStart); err != nil {
+ return err
+ }
+ if sizeInBytes > size {
+ return nil
+ }
+ return f.Truncate(sizeInBytes)
+}
diff --git a/client_sdk/pkg/fileutil/over_preallocate_unix.go b/client_sdk/pkg/fileutil/over_preallocate_unix.go
new file mode 100644
index 00000000000..4389252d186
--- /dev/null
+++ b/client_sdk/pkg/fileutil/over_preallocate_unix.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+ err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
+ if err != nil {
+ errno, ok := err.(syscall.Errno)
+ // not supported; fallback
+ // fallocate EINTRs frequently in some environments; fallback
+ if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
+ return preallocExtendTrunc(f, sizeInBytes)
+ }
+ }
+ return err
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error {
+ // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
+ err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
+ if err != nil {
+ errno, ok := err.(syscall.Errno)
+ // treat not supported as nil error
+ if ok && errno == syscall.ENOTSUP {
+ return nil
+ }
+ }
+ return err
+}
diff --git a/client/pkg/fileutil/preallocate_darwin.go b/client_sdk/pkg/fileutil/preallocate_darwin.go
similarity index 99%
rename from client/pkg/fileutil/preallocate_darwin.go
rename to client_sdk/pkg/fileutil/preallocate_darwin.go
index e74968d0351..caab143dd30 100644
--- a/client/pkg/fileutil/preallocate_darwin.go
+++ b/client_sdk/pkg/fileutil/preallocate_darwin.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build darwin
+// +build darwin
package fileutil
diff --git a/client/pkg/fileutil/preallocate_unsupported.go b/client_sdk/pkg/fileutil/preallocate_unsupported.go
similarity index 97%
rename from client/pkg/fileutil/preallocate_unsupported.go
rename to client_sdk/pkg/fileutil/preallocate_unsupported.go
index e7fd937a436..2c46dd49075 100644
--- a/client/pkg/fileutil/preallocate_unsupported.go
+++ b/client_sdk/pkg/fileutil/preallocate_unsupported.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build !linux && !darwin
+// +build !linux,!darwin
package fileutil
diff --git a/client/pkg/fileutil/purge.go b/client_sdk/pkg/fileutil/purge.go
similarity index 86%
rename from client/pkg/fileutil/purge.go
rename to client_sdk/pkg/fileutil/purge.go
index f4492009d6c..e8ac0ca6f58 100644
--- a/client/pkg/fileutil/purge.go
+++ b/client_sdk/pkg/fileutil/purge.go
@@ -41,12 +41,6 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval
lg = zap.NewNop()
}
errC := make(chan error, 1)
- lg.Info("started to purge file",
- zap.String("dir", dirname),
- zap.String("suffix", suffix),
- zap.Uint("max", max),
- zap.Duration("interval", interval))
-
go func() {
if donec != nil {
defer close(donec)
@@ -69,16 +63,14 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval
f := filepath.Join(dirname, newfnames[0])
l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
if err != nil {
- lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err))
break
}
if err = os.Remove(f); err != nil {
- lg.Error("failed to remove file", zap.String("path", f), zap.Error(err))
errC <- err
return
}
if err = l.Close(); err != nil {
- lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
+ lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
errC <- err
return
}
diff --git a/client/pkg/fileutil/read_dir.go b/client_sdk/pkg/fileutil/read_dir.go
similarity index 95%
rename from client/pkg/fileutil/read_dir.go
rename to client_sdk/pkg/fileutil/read_dir.go
index 2eeaa89bc04..e1b93b8ecc8 100644
--- a/client/pkg/fileutil/read_dir.go
+++ b/client_sdk/pkg/fileutil/read_dir.go
@@ -40,7 +40,7 @@ func (op *ReadDirOp) applyOpts(opts []ReadDirOption) {
}
}
-// ReadDir returns the filenames in the given directory in sorted order.
+// ReadDir 返回指定目录下所有经过排序的文件
func ReadDir(d string, opts ...ReadDirOption) ([]string, error) {
op := &ReadDirOp{}
op.applyOpts(opts)
diff --git a/client/pkg/fileutil/sync.go b/client_sdk/pkg/fileutil/sync.go
similarity index 97%
rename from client/pkg/fileutil/sync.go
rename to client_sdk/pkg/fileutil/sync.go
index 670d01fadcc..0a0855309e9 100644
--- a/client/pkg/fileutil/sync.go
+++ b/client_sdk/pkg/fileutil/sync.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build !linux && !darwin
+// +build !linux,!darwin
package fileutil
diff --git a/client/pkg/fileutil/sync_darwin.go b/client_sdk/pkg/fileutil/sync_darwin.go
similarity index 98%
rename from client/pkg/fileutil/sync_darwin.go
rename to client_sdk/pkg/fileutil/sync_darwin.go
index 7affa78ea64..1923b276ea0 100644
--- a/client/pkg/fileutil/sync_darwin.go
+++ b/client_sdk/pkg/fileutil/sync_darwin.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build darwin
+// +build darwin
package fileutil
diff --git a/client/pkg/fileutil/sync_linux.go b/client_sdk/pkg/fileutil/sync_linux.go
similarity index 98%
rename from client/pkg/fileutil/sync_linux.go
rename to client_sdk/pkg/fileutil/sync_linux.go
index a3172382e5a..b9398c23f94 100644
--- a/client/pkg/fileutil/sync_linux.go
+++ b/client_sdk/pkg/fileutil/sync_linux.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build linux
+// +build linux
package fileutil
diff --git a/client/pkg/logutil/doc.go b/client_sdk/pkg/logutil/doc.go
similarity index 100%
rename from client/pkg/logutil/doc.go
rename to client_sdk/pkg/logutil/doc.go
diff --git a/client/pkg/logutil/log_level.go b/client_sdk/pkg/logutil/log_level.go
similarity index 92%
rename from client/pkg/logutil/log_level.go
rename to client_sdk/pkg/logutil/log_level.go
index 6c95bcfe9f7..57cd09cad84 100644
--- a/client/pkg/logutil/log_level.go
+++ b/client_sdk/pkg/logutil/log_level.go
@@ -20,7 +20,7 @@ import (
var DefaultLogLevel = "info"
-// ConvertToZapLevel converts log level string to zapcore.Level.
+// ConvertToZapLevel 将日志级别字符串转换为zapcore.Level.
func ConvertToZapLevel(lvl string) zapcore.Level {
var level zapcore.Level
if err := level.Set(lvl); err != nil {
diff --git a/client/pkg/logutil/zap.go b/client_sdk/pkg/logutil/zap.go
similarity index 79%
rename from client/pkg/logutil/zap.go
rename to client_sdk/pkg/logutil/zap.go
index 0a4374c77b8..15aa56a76d0 100644
--- a/client/pkg/logutil/zap.go
+++ b/client_sdk/pkg/logutil/zap.go
@@ -21,18 +21,7 @@ import (
"go.uber.org/zap/zapcore"
)
-// CreateDefaultZapLogger creates a logger with default zap configuration
-func CreateDefaultZapLogger(level zapcore.Level) (*zap.Logger, error) {
- lcfg := DefaultZapLoggerConfig
- lcfg.Level = zap.NewAtomicLevelAt(level)
- c, err := lcfg.Build()
- if err != nil {
- return nil, err
- }
- return c, nil
-}
-
-// DefaultZapLoggerConfig defines default zap logger configuration.
+// DefaultZapLoggerConfig 定义了默认的zap logger 配置.
var DefaultZapLoggerConfig = zap.Config{
Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
@@ -42,9 +31,9 @@ var DefaultZapLoggerConfig = zap.Config{
Thereafter: 100,
},
- Encoding: DefaultLogFormat,
+ Encoding: "json",
- // copied from "zap.NewProductionEncoderConfig" with some updates
+ // 复制自 "zap.NewProductionEncoderConfig",并进行了一些更新.
EncoderConfig: zapcore.EncoderConfig{
TimeKey: "ts",
LevelKey: "level",
@@ -59,20 +48,21 @@ var DefaultZapLoggerConfig = zap.Config{
EncodeCaller: zapcore.ShortCallerEncoder,
},
- // Use "/dev/null" to discard all
+ // Use "/dev/null" 弃用所有
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
-// MergeOutputPaths merges logging output paths, resolving conflicts.
+// MergeOutputPaths 合并日志输出路径,解决冲突.,如果有/dev/null,就丢弃其他所有
func MergeOutputPaths(cfg zap.Config) zap.Config {
+ _ = zap.NewProductionEncoderConfig
outputs := make(map[string]struct{})
for _, v := range cfg.OutputPaths {
outputs[v] = struct{}{}
}
outputSlice := make([]string, 0)
if _, ok := outputs["/dev/null"]; ok {
- // "/dev/null" to discard all
+ // "/dev/null" 丢弃所有
outputSlice = []string{"/dev/null"}
} else {
for k := range outputs {
diff --git a/client/pkg/logutil/zap_journal.go b/client_sdk/pkg/logutil/zap_journal.go
similarity index 97%
rename from client/pkg/logutil/zap_journal.go
rename to client_sdk/pkg/logutil/zap_journal.go
index c6adc010381..5bc195a424c 100644
--- a/client/pkg/logutil/zap_journal.go
+++ b/client_sdk/pkg/logutil/zap_journal.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build !windows
+// +build !windows
package logutil
@@ -24,7 +25,7 @@ import (
"os"
"path/filepath"
- "go.etcd.io/etcd/client/pkg/v3/systemd"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/systemd"
"github.com/coreos/go-systemd/v22/journal"
"go.uber.org/zap/zapcore"
diff --git a/client_sdk/pkg/pathutil/path.go b/client_sdk/pkg/pathutil/path.go
new file mode 100644
index 00000000000..bbbde2a1eb4
--- /dev/null
+++ b/client_sdk/pkg/pathutil/path.go
@@ -0,0 +1,31 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pathutil implements utility functions for handling slash-separated
+// paths.
+package pathutil
+
+import "path"
+
+// CanonicalURLPath returns the canonical url path for p, which follows the rules:
+// 1. the path always starts with "/"
+// 2. replace multiple slashes with a single slash
+// 3. replace each '.' '..' path name element with equivalent one
+// 4. keep the trailing slash
+// The function is borrowed from stdlib http.cleanPath in etcd.go.
+func CanonicalURLPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root,
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ np += "/"
+ }
+ return np
+}
diff --git a/client/pkg/srv/srv.go b/client_sdk/pkg/srv/srv.go
similarity index 96%
rename from client/pkg/srv/srv.go
rename to client_sdk/pkg/srv/srv.go
index 15fda134d6a..97335361095 100644
--- a/client/pkg/srv/srv.go
+++ b/client_sdk/pkg/srv/srv.go
@@ -21,7 +21,7 @@ import (
"net/url"
"strings"
- "go.etcd.io/etcd/client/pkg/v3/types"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
)
var (
@@ -33,6 +33,7 @@ var (
// GetCluster gets the cluster information via DNS discovery.
// Also sees each entry as a separate instance.
func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) {
+ tempName := int(0)
tcp2ap := make(map[string]url.URL)
// First, resolve the apurls
@@ -44,10 +45,7 @@ func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]
tcp2ap[tcpAddr.String()] = url
}
- var (
- tempName int
- stringParts []string
- )
+ stringParts := []string{}
updateNodeMap := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", dns)
if err != nil {
@@ -99,10 +97,8 @@ type SRVClients struct {
// GetClient looks up the client endpoints for a service and domain.
func GetClient(service, domain string, serviceName string) (*SRVClients, error) {
- var (
- urls []*url.URL
- srvs []*net.SRV
- )
+ var urls []*url.URL
+ var srvs []*net.SRV
updateURLs := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", domain)
diff --git a/client/pkg/systemd/doc.go b/client_sdk/pkg/systemd/doc.go
similarity index 100%
rename from client/pkg/systemd/doc.go
rename to client_sdk/pkg/systemd/doc.go
diff --git a/client/pkg/systemd/journal.go b/client_sdk/pkg/systemd/journal.go
similarity index 100%
rename from client/pkg/systemd/journal.go
rename to client_sdk/pkg/systemd/journal.go
diff --git a/client_sdk/pkg/testutil/assert.go b/client_sdk/pkg/testutil/assert.go
new file mode 100644
index 00000000000..e8e042021e9
--- /dev/null
+++ b/client_sdk/pkg/testutil/assert.go
@@ -0,0 +1,67 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testutil
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+func AssertEqual(t *testing.T, e, a interface{}, msg ...string) {
+ t.Helper()
+ if (e == nil || a == nil) && (isNil(e) && isNil(a)) {
+ return
+ }
+ if reflect.DeepEqual(e, a) {
+ return
+ }
+ s := ""
+ if len(msg) > 1 {
+ s = msg[0] + ": "
+ }
+ s = fmt.Sprintf("%sexpected %+v, got %+v", s, e, a)
+ FatalStack(t, s)
+}
+
+func AssertNil(t *testing.T, v interface{}) {
+ t.Helper()
+ AssertEqual(t, nil, v)
+}
+
+func AssertNotNil(t *testing.T, v interface{}) {
+ t.Helper()
+ if v == nil {
+ t.Fatalf("expected non-nil, got %+v", v)
+ }
+}
+
+func AssertTrue(t *testing.T, v bool, msg ...string) {
+ t.Helper()
+ AssertEqual(t, true, v, msg...)
+}
+
+func AssertFalse(t *testing.T, v bool, msg ...string) {
+ t.Helper()
+ AssertEqual(t, false, v, msg...)
+}
+
+func isNil(v interface{}) bool {
+ if v == nil {
+ return true
+ }
+ rv := reflect.ValueOf(v)
+ return rv.Kind() != reflect.Struct && rv.IsNil()
+}
diff --git a/client/pkg/testutil/leak.go b/client_sdk/pkg/testutil/leak.go
similarity index 93%
rename from client/pkg/testutil/leak.go
rename to client_sdk/pkg/testutil/leak.go
index 8c08fbd5123..b2b9bdda757 100644
--- a/client/pkg/testutil/leak.go
+++ b/client_sdk/pkg/testutil/leak.go
@@ -23,7 +23,7 @@ CheckLeakedGoroutine verifies tests do not leave any leaky
goroutines. It returns true when there are goroutines still
running(leaking) after all tests.
- import "go.etcd.io/etcd/client/pkg/v3/testutil"
+ import "github.com/ls-2018/etcd_cn/client_sdk/pkg/testutil"
func TestMain(m *testing.M) {
testutil.MustTestMainWithLeakDetection(m)
@@ -48,7 +48,7 @@ func CheckLeakedGoroutine() bool {
stackCount[normalized]++
}
- fmt.Fprint(os.Stderr, "Unexpected goroutines running after all test(s).\n")
+ fmt.Fprintf(os.Stderr, "Unexpected goroutines running after all test(s).\n")
for stack, count := range stackCount {
fmt.Fprintf(os.Stderr, "%d instances of:\n%s\n", count, stack)
}
@@ -140,8 +140,8 @@ func interestingGoroutines() (gs []string) {
strings.Contains(stack, "created by testing.(*T).Run") ||
strings.Contains(stack, "testing.Main(") ||
strings.Contains(stack, "runtime.goexit") ||
- strings.Contains(stack, "go.etcd.io/etcd/client/pkg/v3/testutil.interestingGoroutines") ||
- strings.Contains(stack, "go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop") ||
+ strings.Contains(stack, "github.com/ls-2018/etcd_cn/client_sdk/pkg/testutil.interestingGoroutines") ||
+ strings.Contains(stack, "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil.(*MergeLogger).outputLoop") ||
strings.Contains(stack, "github.com/golang/glog.(*loggingT).flushDaemon") ||
strings.Contains(stack, "created by runtime.gc") ||
strings.Contains(stack, "created by text/template/parse.lex") ||
diff --git a/client/pkg/testutil/pauseable_handler.go b/client_sdk/pkg/testutil/pauseable_handler.go
similarity index 100%
rename from client/pkg/testutil/pauseable_handler.go
rename to client_sdk/pkg/testutil/pauseable_handler.go
diff --git a/client/pkg/testutil/recorder.go b/client_sdk/pkg/testutil/recorder.go
similarity index 97%
rename from client/pkg/testutil/recorder.go
rename to client_sdk/pkg/testutil/recorder.go
index 064e7313875..41349fec52d 100644
--- a/client/pkg/testutil/recorder.go
+++ b/client_sdk/pkg/testutil/recorder.go
@@ -87,7 +87,7 @@ type recorderStream struct {
}
func NewRecorderStream() Recorder {
- return NewRecorderStreamWithWaitTimout(5 * time.Second)
+ return NewRecorderStreamWithWaitTimout(time.Duration(5 * time.Second))
}
func NewRecorderStreamWithWaitTimout(waitTimeout time.Duration) Recorder {
diff --git a/client/pkg/testutil/testingtb.go b/client_sdk/pkg/testutil/testingtb.go
similarity index 98%
rename from client/pkg/testutil/testingtb.go
rename to client_sdk/pkg/testutil/testingtb.go
index bafaccf9846..970542c0405 100644
--- a/client/pkg/testutil/testingtb.go
+++ b/client_sdk/pkg/testutil/testingtb.go
@@ -15,6 +15,7 @@
package testutil
import (
+ "io/ioutil"
"log"
"os"
)
@@ -111,7 +112,7 @@ func (t *testingTBProthesis) Name() string {
}
func (t *testingTBProthesis) TempDir() string {
- dir, err := os.MkdirTemp("", t.name)
+ dir, err := ioutil.TempDir("", t.name)
if err != nil {
t.Fatal(err)
}
diff --git a/client/pkg/testutil/testutil.go b/client_sdk/pkg/testutil/testutil.go
similarity index 100%
rename from client/pkg/testutil/testutil.go
rename to client_sdk/pkg/testutil/testutil.go
diff --git a/client/pkg/testutil/var.go b/client_sdk/pkg/testutil/var.go
similarity index 100%
rename from client/pkg/testutil/var.go
rename to client_sdk/pkg/testutil/var.go
diff --git a/client_sdk/pkg/tlsutil/cipher_suites.go b/client_sdk/pkg/tlsutil/cipher_suites.go
new file mode 100644
index 00000000000..8d4ab67f367
--- /dev/null
+++ b/client_sdk/pkg/tlsutil/cipher_suites.go
@@ -0,0 +1,38 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import "crypto/tls"
+
+// GetCipherSuite 返回相应的密码套件. 和布尔值(如果它被支持).
+func GetCipherSuite(s string) (uint16, bool) {
+ for _, c := range tls.CipherSuites() {
+ if s == c.Name {
+ return c.ID, true
+ }
+ }
+ for _, c := range tls.InsecureCipherSuites() {
+ if s == c.Name {
+ return c.ID, true
+ }
+ }
+ switch s {
+ case "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305":
+ return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, true
+ case "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305":
+ return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, true
+ }
+ return 0, false
+}
diff --git a/client/pkg/tlsutil/doc.go b/client_sdk/pkg/tlsutil/doc.go
similarity index 100%
rename from client/pkg/tlsutil/doc.go
rename to client_sdk/pkg/tlsutil/doc.go
diff --git a/client/pkg/tlsutil/tlsutil.go b/client_sdk/pkg/tlsutil/tlsutil.go
similarity index 84%
rename from client/pkg/tlsutil/tlsutil.go
rename to client_sdk/pkg/tlsutil/tlsutil.go
index 0f79865e805..0c581e59277 100644
--- a/client/pkg/tlsutil/tlsutil.go
+++ b/client_sdk/pkg/tlsutil/tlsutil.go
@@ -18,15 +18,15 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
- "os"
+ "io/ioutil"
)
-// NewCertPool creates x509 certPool with provided CA files.
+// NewCertPool 使用提供的CA文件创建X509证书池
func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, CAFile := range CAFiles {
- pemByte, err := os.ReadFile(CAFile)
+ pemByte, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, err
}
@@ -49,14 +49,14 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
return certPool, nil
}
-// NewCert generates TLS cert by using the given cert,key and parse function.
+// NewCert 通过使用给定的cert、key和解析函数生成TLS证书.
func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
- cert, err := os.ReadFile(certfile)
+ cert, err := ioutil.ReadFile(certfile)
if err != nil {
return nil, err
}
- key, err := os.ReadFile(keyfile)
+ key, err := ioutil.ReadFile(keyfile)
if err != nil {
return nil, err
}
diff --git a/client/pkg/transport/doc.go b/client_sdk/pkg/transport/doc.go
similarity index 100%
rename from client/pkg/transport/doc.go
rename to client_sdk/pkg/transport/doc.go
diff --git a/client_sdk/pkg/transport/keepalive_listener.go b/client_sdk/pkg/transport/keepalive_listener.go
new file mode 100644
index 00000000000..ffb9537ce63
--- /dev/null
+++ b/client_sdk/pkg/transport/keepalive_listener.go
@@ -0,0 +1,98 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/code_debug/conn"
+)
+
+type keepAliveConn interface {
+ SetKeepAlive(bool) error
+ SetKeepAlivePeriod(d time.Duration) error
+}
+
+// NewKeepAliveListener returns a listener that listens on the given address.
+// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
+// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
+// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
+func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) {
+ if scheme == "https" {
+ if tlscfg == nil {
+ return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
+ }
+ return newTLSKeepaliveListener(l, tlscfg), nil
+ }
+
+ return &keepaliveListener{
+ Listener: l,
+ }, nil
+}
+
+type keepaliveListener struct{ net.Listener }
+
+func (kln *keepaliveListener) Accept() (net.Conn, error) {
+ c, err := kln.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+ conn.PrintConn("keepaliveListener", c)
+ kac := c.(keepAliveConn)
+ // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
+ // default on linux: 30 + 8 * 30
+ // default on osx: 30 + 8 * 75
+ kac.SetKeepAlive(true)
+ kac.SetKeepAlivePeriod(30 * time.Second)
+ return c, nil
+}
+
+// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
+type tlsKeepaliveListener struct {
+ net.Listener
+ config *tls.Config
+}
+
+// Accept waits for and returns the next incoming TLS connection.
+// The returned connection c is a *tls.Conn.
+func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
+ c, err = l.Listener.Accept()
+ if err != nil {
+ return
+ }
+ conn.PrintConn("tlsKeepaliveListener", c)
+ kac := c.(keepAliveConn)
+ // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
+ // default on linux: 30 + 8 * 30
+ // default on osx: 30 + 8 * 75
+ kac.SetKeepAlive(true)
+ kac.SetKeepAlivePeriod(30 * time.Second)
+ c = tls.Server(c, l.config)
+ return c, nil
+}
+
+// NewListener creates a Listener which accepts connections from an inner
+// Listener and wraps each connection with Server.
+// The configuration config必须是non-nil and must have
+// at least one certificate.
+func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener {
+ l := &tlsKeepaliveListener{}
+ l.Listener = inner
+ l.config = config
+ return l
+}
diff --git a/client/pkg/transport/limit_listen.go b/client_sdk/pkg/transport/limit_listen.go
similarity index 87%
rename from client/pkg/transport/limit_listen.go
rename to client_sdk/pkg/transport/limit_listen.go
index 404722ba76e..fddc4a6406a 100644
--- a/client/pkg/transport/limit_listen.go
+++ b/client_sdk/pkg/transport/limit_listen.go
@@ -21,12 +21,12 @@ import (
"net"
"sync"
"time"
-)
-var (
- ErrNotTCP = errors.New("only tcp connections have keepalive")
+ "github.com/ls-2018/etcd_cn/code_debug/conn"
)
+var ErrNotTCP = errors.New("only tcp connections have keepalive")
+
// LimitListener returns a Listener that accepts at most n simultaneous
// connections from the provided Listener.
func LimitListener(l net.Listener, n int) net.Listener {
@@ -48,6 +48,7 @@ func (l *limitListener) Accept() (net.Conn, error) {
l.release()
return nil, err
}
+ conn.PrintConn("limitListener", c)
return &limitListenerConn{Conn: c, release: l.release}, nil
}
@@ -63,9 +64,6 @@ func (l *limitListenerConn) Close() error {
return err
}
-// SetKeepAlive sets keepalive
-//
-// Deprecated: use (*keepAliveConn) SetKeepAlive instead.
func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
@@ -74,9 +72,6 @@ func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
return tcpc.SetKeepAlive(doKeepAlive)
}
-// SetKeepAlivePeriod sets keepalive period
-//
-// Deprecated: use (*keepAliveConn) SetKeepAlivePeriod instead.
func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
tcpc, ok := l.Conn.(*net.TCPConn)
if !ok {
diff --git a/client_sdk/pkg/transport/listener.go b/client_sdk/pkg/transport/listener.go
new file mode 100644
index 00000000000..de88dbc602a
--- /dev/null
+++ b/client_sdk/pkg/transport/listener.go
@@ -0,0 +1,519 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/tlsutil"
+
+ "go.uber.org/zap"
+)
+
+// NewListener creates a new listner.
+func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) {
+ return newListener(addr, scheme, WithTLSInfo(tlsinfo))
+}
+
+// NewListenerWithOpts OK
+func NewListenerWithOpts(addr, scheme string, opts ...ListenerOption) (net.Listener, error) {
+ return newListener(addr, scheme, opts...)
+}
+
+func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, error) {
+ if scheme == "unix" || scheme == "unixs" {
+ // unix sockets via unix://laddr
+ return NewUnixListener(addr)
+ }
+
+ lnOpts := newListenOpts(opts...)
+
+ switch {
+ case lnOpts.IsSocketOpts():
+ config, err := newListenConfig(lnOpts.socketOpts)
+ if err != nil {
+ return nil, err
+ }
+ lnOpts.ListenConfig = config
+ fallthrough
+ case lnOpts.IsTimeout(), lnOpts.IsSocketOpts():
+ ln, err := lnOpts.ListenConfig.Listen(context.TODO(), "tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ lnOpts.Listener = &rwTimeoutListener{
+ Listener: ln,
+ readTimeout: lnOpts.readTimeout,
+ writeTimeout: lnOpts.writeTimeout,
+ }
+ case lnOpts.IsTimeout():
+ ln, err := net.Listen("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ lnOpts.Listener = &rwTimeoutListener{
+ Listener: ln,
+ readTimeout: lnOpts.readTimeout,
+ writeTimeout: lnOpts.writeTimeout,
+ }
+ default:
+ ln, err := net.Listen("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ lnOpts.Listener = ln
+ }
+
+ if lnOpts.skipTLSInfoCheck && !lnOpts.IsTLS() {
+ return lnOpts.Listener, nil
+ }
+ return wrapTLS(scheme, lnOpts.tlsInfo, lnOpts.Listener)
+}
+
+func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) {
+ if scheme != "https" && scheme != "unixs" {
+ return l, nil
+ }
+ if tlsinfo != nil && tlsinfo.SkipClientSANVerify {
+ return NewTLSListener(l, tlsinfo)
+ }
+ return newTLSListener(l, tlsinfo, checkSAN)
+}
+
+func newListenConfig(sopts *SocketOpts) (net.ListenConfig, error) {
+ lc := net.ListenConfig{}
+ if sopts != nil {
+ ctls := getControls(sopts)
+ if len(ctls) > 0 {
+ lc.Control = ctls.Control
+ }
+ }
+ return lc, nil
+}
+
+type TLSInfo struct {
+ // CertFile 服务端证书,如果ClientCertFile为空,它也将被用作_客户证书.
+ CertFile string
+ // KeyFile 是CertFile的密钥.
+ KeyFile string
+
+ // ClientCertFile client 证书,且启用认证;则使用CertFile
+ ClientCertFile string
+ // ClientKeyFile 是ClientCertFile的密钥
+ ClientKeyFile string
+
+ TrustedCAFile string // ca证书
+ ClientCertAuth bool // 客户端证书验证;默认false
+ CRLFile string // 证书吊销列表文件的路径
+ InsecureSkipVerify bool
+ SkipClientSANVerify bool
+
+ // ServerName 在发现/虚拟主机的情况下,确保证书与给定的主机相匹配
+ ServerName string
+
+ // HandshakeFailure 当一个连接无法握手时,会被选择性地调用.之后,连接将被立即关闭.
+ HandshakeFailure func(*tls.Conn, error)
+
+ // CipherSuites 是一个支持的密码套件的列表.如果是空的,Go 默认会自动填充它.请注意,密码套件是按照给定的顺序进行优先排序的.
+ CipherSuites []uint16
+
+ selfCert bool // 自签
+
+ // parseFunc 的存在是为了简化测试.通常情况下,parseFunc应该留为零.在这种情况下,将使用tls.X509KeyPair.
+ parseFunc func([]byte, []byte) (tls.Certificate, error)
+
+ // AllowedCN 客户端必须提供的common Name;在证书里
+ AllowedCN string
+
+ // AllowedHostname 是一个IP地址或主机名,必须与客户提供的TLS证书相匹配.
+ AllowedHostname string
+
+ Logger *zap.Logger
+
+ // EmptyCN indicates that the cert must have empty CN.
+ // If true, ClientConfig() will return an error for a cert with non empty CN.
+ EmptyCN bool
+}
+
+func (info TLSInfo) String() string {
+ return fmt.Sprintf("cert = %s, key = %s, client-cert=%s, client-key=%s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.ClientCertFile, info.ClientKeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile)
+}
+
+func (info TLSInfo) Empty() bool {
+ return info.CertFile == "" && info.KeyFile == ""
+}
+
+func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) {
+ info.Logger = lg
+ if selfSignedCertValidity == 0 {
+ err = fmt.Errorf("selfSignedCertValidity 是无效的,它应该大于0 ")
+ info.Logger.Warn("不能生成证书", zap.Error(err))
+ return
+ }
+ err = fileutil.TouchDirAll(dirpath)
+ if err != nil {
+ if info.Logger != nil {
+ info.Logger.Warn("无法创建证书目录", zap.Error(err))
+ }
+ return
+ }
+
+ certPath, err := filepath.Abs(filepath.Join(dirpath, "cert.pem"))
+ if err != nil {
+ return
+ }
+ keyPath, err := filepath.Abs(filepath.Join(dirpath, "key.pem"))
+ if err != nil {
+ return
+ }
+ _, errcert := os.Stat(certPath)
+ _, errkey := os.Stat(keyPath)
+ if errcert == nil && errkey == nil {
+ info.CertFile = certPath
+ info.KeyFile = keyPath
+ info.ClientCertFile = certPath
+ info.ClientKeyFile = keyPath
+ info.selfCert = true
+ return
+ }
+
+ // 编号
+ serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
+ if err != nil {
+ if info.Logger != nil {
+ info.Logger.Warn("无法生成随机数", zap.Error(err))
+ }
+ return
+ }
+
+ tmpl := x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{Organization: []string{"etcd"}},
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Duration(selfSignedCertValidity) * 365 * (24 * time.Hour)),
+ // 加密、解密
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ // 服务端验证
+ ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...),
+ BasicConstraintsValid: true,
+ IPAddresses: []net.IP{},
+ DNSNames: []string{},
+ }
+
+ if info.Logger != nil {
+ info.Logger.Warn("自动生成证书", zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter))
+ }
+
+ for _, host := range hosts {
+ h, _, _ := net.SplitHostPort(host)
+ if ip := net.ParseIP(h); ip != nil {
+ tmpl.IPAddresses = append(tmpl.IPAddresses, ip)
+ } else {
+ tmpl.DNSNames = append(tmpl.DNSNames, h)
+ }
+ }
+
+ priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ if info.Logger != nil {
+ info.Logger.Warn("不能生成ECDSA密钥", zap.Error(err))
+ }
+ return
+ }
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
+ if err != nil {
+ if info.Logger != nil {
+ info.Logger.Warn("无法生成x509证书", zap.Error(err))
+ }
+ return
+ }
+
+ certOut, err := os.Create(certPath)
+ if err != nil {
+ info.Logger.Warn("无法创建证书文件", zap.String("path", certPath), zap.Error(err))
+ return
+ }
+ // 证书文件
+ pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
+ certOut.Close()
+ if info.Logger != nil {
+ info.Logger.Info("创建的Cert文件", zap.String("path", certPath))
+ }
+
+ b, err := x509.MarshalECPrivateKey(priv)
+ if err != nil {
+ return
+ }
+ keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
+ if err != nil {
+ if info.Logger != nil {
+ info.Logger.Warn("无法创建私钥文件", zap.String("path", keyPath), zap.Error(err))
+ }
+ return
+ }
+ // 秘钥
+ pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
+ keyOut.Close()
+ if info.Logger != nil {
+ info.Logger.Info("创建的私钥文件", zap.String("path", keyPath))
+ }
+ return SelfCert(lg, dirpath, hosts, selfSignedCertValidity)
+}
+
+// OK
+func (info TLSInfo) baseConfig() (*tls.Config, error) {
+ if info.KeyFile == "" || info.CertFile == "" {
+ return nil, fmt.Errorf("KeyFile和CertFile必须同时存在[key: %v, cert: %v]", info.KeyFile, info.CertFile)
+ }
+ if info.Logger == nil {
+ info.Logger = zap.NewNop()
+ }
+
+ _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) // parseFunc 在主程序里是nil
+ if err != nil {
+ return nil, err
+ }
+
+ // 如果提供了客户证书和密钥,则对其进行预验证.这可以确保我们在接受任何连接之前崩溃.
+ if (info.ClientKeyFile == "") != (info.ClientCertFile == "") {
+ return nil, fmt.Errorf("ClientKeyFile和ClientCertFile必须同时存在或同时不存在.: key: %v, cert: %v]", info.ClientKeyFile, info.ClientCertFile)
+ }
+ if info.ClientCertFile != "" {
+ _, err := tlsutil.NewCert(info.ClientCertFile, info.ClientKeyFile, info.parseFunc)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cfg := &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ ServerName: info.ServerName,
+ }
+
+ if len(info.CipherSuites) > 0 {
+ cfg.CipherSuites = info.CipherSuites
+ }
+
+ // 客户端证书可以通过CN上的精确匹配来验证,也可以通过对CN和san进行更一般的检查来验证.
+ var verifyCertificate func(*x509.Certificate) bool
+ if info.AllowedCN != "" {
+ if info.AllowedHostname != "" {
+ return nil, fmt.Errorf("AllowedCN and AllowedHostname 只能指定一个 (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname)
+ }
+ verifyCertificate = func(cert *x509.Certificate) bool {
+ return info.AllowedCN == cert.Subject.CommonName
+ }
+ }
+ if info.AllowedHostname != "" {
+ verifyCertificate = func(cert *x509.Certificate) bool {
+ return cert.VerifyHostname(info.AllowedHostname) == nil
+ }
+ }
+ if verifyCertificate != nil {
+ cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
+ for _, chains := range verifiedChains {
+ if len(chains) != 0 {
+ if verifyCertificate(chains[0]) {
+ return nil
+ }
+ }
+ }
+ return errors.New("客户端证书认证失败")
+ }
+ }
+ // 是有同一个CA签发的
+ // 服务端获取证书
+ cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) {
+ cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
+ if os.IsNotExist(err) {
+ if info.Logger != nil {
+ info.Logger.Warn(
+ "未能找到peer的证书文件",
+ zap.String("cert-file", info.CertFile),
+ zap.String("key-file", info.KeyFile),
+ zap.Error(err),
+ )
+ }
+ } else if err != nil {
+ if info.Logger != nil {
+ info.Logger.Warn(
+ "未能创建peer证书",
+ zap.String("cert-file", info.CertFile),
+ zap.String("key-file", info.KeyFile),
+ zap.Error(err),
+ )
+ }
+ }
+ return cert, err
+ }
+ // 客户端获取证书
+ cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) {
+ certfile, keyfile := info.CertFile, info.KeyFile
+ if info.ClientCertFile != "" {
+ certfile, keyfile = info.ClientCertFile, info.ClientKeyFile
+ }
+ cert, err = tlsutil.NewCert(certfile, keyfile, info.parseFunc)
+ if os.IsNotExist(err) {
+ if info.Logger != nil {
+ info.Logger.Warn(
+ "未能找到peer的证书文件",
+ zap.String("cert-file", certfile),
+ zap.String("key-file", keyfile),
+ zap.Error(err),
+ )
+ }
+ } else if err != nil {
+ if info.Logger != nil {
+ info.Logger.Warn(
+ "未能创建peer证书",
+ zap.String("cert-file", certfile),
+ zap.String("key-file", keyfile),
+ zap.Error(err),
+ )
+ }
+ }
+ return cert, err
+ }
+ return cfg, nil
+}
+
+// OK
+func (info TLSInfo) cafiles() []string {
+ cs := make([]string, 0)
+ if info.TrustedCAFile != "" {
+ cs = append(cs, info.TrustedCAFile)
+ }
+ return cs
+}
+
+// ServerConfig generates a tls.Config object for use by an HTTP etcd.
+func (info TLSInfo) ServerConfig() (*tls.Config, error) {
+ cfg, err := info.baseConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ if info.Logger == nil {
+ info.Logger = zap.NewNop()
+ }
+
+ cfg.ClientAuth = tls.NoClientCert
+ if info.TrustedCAFile != "" || info.ClientCertAuth {
+ cfg.ClientAuth = tls.RequireAndVerifyClientCert
+ }
+
+ cs := info.cafiles()
+ if len(cs) > 0 {
+ info.Logger.Info("Loading cert pool", zap.Strings("cs", cs),
+ zap.Any("tlsinfo", info))
+ cp, err := tlsutil.NewCertPool(cs)
+ if err != nil {
+ return nil, err
+ }
+ cfg.ClientCAs = cp
+ }
+
+ // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP etcd
+ cfg.NextProtos = []string{"h2"}
+
+ // go1.13 enables TLS 1.3 by default
+ // and in TLS 1.3, cipher suites are not configurable
+ // setting Max TLS version to TLS 1.2 for go 1.13
+ cfg.MaxVersion = tls.VersionTLS12
+
+ return cfg, nil
+}
+
+// ClientConfig 生成一个tls.Config对象,供HTTP客户端使用.
+func (info TLSInfo) ClientConfig() (*tls.Config, error) {
+ var cfg *tls.Config
+ var err error
+
+ if !info.Empty() {
+ cfg, err = info.baseConfig() // // 初始化TLS配置
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ cfg = &tls.Config{ServerName: info.ServerName}
+ }
+ cfg.InsecureSkipVerify = info.InsecureSkipVerify // 客户端是否验证服务端证书链和主机名
+
+ cs := info.cafiles()
+ if len(cs) > 0 {
+ cfg.RootCAs, err = tlsutil.NewCertPool(cs)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if info.selfCert {
+ cfg.InsecureSkipVerify = true
+ }
+
+ if info.EmptyCN {
+ hasNonEmptyCN := false
+ cn := ""
+ _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) {
+ var block *pem.Block
+ block, _ = pem.Decode(certPEMBlock)
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return tls.Certificate{}, err
+ }
+ if len(cert.Subject.CommonName) != 0 {
+ hasNonEmptyCN = true
+ cn = cert.Subject.CommonName
+ }
+ return tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+ })
+ if err != nil {
+ return nil, err
+ }
+ if hasNonEmptyCN {
+ return nil, fmt.Errorf("证书没有CN(%s): %s", cn, info.CertFile)
+ }
+ }
+
+ cfg.MaxVersion = tls.VersionTLS12
+
+ return cfg, nil
+}
+
+// IsClosedConnError returns true if the error is from closing listener, cmux.
+// copied from golang.org/x/net/http2/http2.go
+func IsClosedConnError(err error) bool {
+ // 'use of closed network connection' (Go <=1.8)
+ // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing)
+ // 'mux: listener closed' (cmux.ErrListenerClosed)
+ return err != nil && strings.Contains(err.Error(), "closed")
+}
diff --git a/client_sdk/pkg/transport/listener_opts.go b/client_sdk/pkg/transport/listener_opts.go
new file mode 100644
index 00000000000..82d829c0bc9
--- /dev/null
+++ b/client_sdk/pkg/transport/listener_opts.go
@@ -0,0 +1,76 @@
+package transport
+
+import (
+ "net"
+ "time"
+)
+
+type ListenerOptions struct {
+ Listener net.Listener
+ ListenConfig net.ListenConfig
+
+ socketOpts *SocketOpts // 套接字选项
+ tlsInfo *TLSInfo // 证书信息
+ skipTLSInfoCheck bool
+ writeTimeout time.Duration // 设置读写超时
+ readTimeout time.Duration
+}
+
+func newListenOpts(opts ...ListenerOption) *ListenerOptions {
+ lnOpts := &ListenerOptions{}
+ lnOpts.applyOpts(opts)
+ return lnOpts
+}
+
+func (lo *ListenerOptions) applyOpts(opts []ListenerOption) {
+ for _, opt := range opts {
+ opt(lo)
+ }
+}
+
+// IsTimeout returns true if the listener has a read/write timeout defined.
+func (lo *ListenerOptions) IsTimeout() bool { return lo.readTimeout != 0 || lo.writeTimeout != 0 }
+
+// IsSocketOpts returns true if the listener options includes socket options.
+func (lo *ListenerOptions) IsSocketOpts() bool {
+ if lo.socketOpts == nil {
+ return false
+ }
+ return lo.socketOpts.ReusePort || lo.socketOpts.ReuseAddress
+}
+
+// IsTLS returns true if listner options includes TLSInfo.
+func (lo *ListenerOptions) IsTLS() bool {
+ if lo.tlsInfo == nil {
+ return false
+ }
+ return !lo.tlsInfo.Empty()
+}
+
+// ListenerOption 是可以应用于listener的选项.
+type ListenerOption func(*ListenerOptions)
+
+// WithTimeout 允许对listener应用一个读或写的超时.
+func WithTimeout(read, write time.Duration) ListenerOption {
+ return func(lo *ListenerOptions) {
+ lo.writeTimeout = write
+ lo.readTimeout = read
+ }
+}
+
+// WithSocketOpts 定义了将应用于listener的套接字选项.
+func WithSocketOpts(s *SocketOpts) ListenerOption {
+ return func(lo *ListenerOptions) { lo.socketOpts = s }
+}
+
+// WithTLSInfo 向listener添加TLS证书.
+func WithTLSInfo(t *TLSInfo) ListenerOption {
+ return func(lo *ListenerOptions) { lo.tlsInfo = t }
+}
+
+// WithSkipTLSInfoCheck when true a transport can be created with an https scheme
+// without passing TLSInfo, circumventing not presented error. Skipping this check
+// also requires that TLSInfo is not passed.
+func WithSkipTLSInfoCheck(skip bool) ListenerOption {
+ return func(lo *ListenerOptions) { lo.skipTLSInfoCheck = skip }
+}
diff --git a/client/pkg/transport/listener_tls.go b/client_sdk/pkg/transport/listener_tls.go
similarity index 94%
rename from client/pkg/transport/listener_tls.go
rename to client_sdk/pkg/transport/listener_tls.go
index 1a283739318..10d296e401c 100644
--- a/client/pkg/transport/listener_tls.go
+++ b/client_sdk/pkg/transport/listener_tls.go
@@ -19,10 +19,12 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
+ "io/ioutil"
"net"
- "os"
"strings"
"sync"
+
+ cm "github.com/ls-2018/etcd_cn/code_debug/conn"
)
// tlsListener overrides a TLS listener so it will reject client
@@ -39,6 +41,8 @@ type tlsListener struct {
type tlsCheckFunc func(context.Context, *tls.Conn) error
+var crlBytesMap sync.Map
+
// NewTLSListener handshakes TLS connections and performs optional CRL checking.
func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) {
check := func(context.Context, *tls.Conn) error { return nil }
@@ -128,7 +132,7 @@ func (l *tlsListener) acceptLoop() {
l.err = err
return
}
-
+ cm.PrintConn("tlsListener", conn)
pendingMu.Lock()
pending[conn] = struct{}{}
pendingMu.Unlock()
@@ -167,11 +171,18 @@ func (l *tlsListener) acceptLoop() {
}
func checkCRL(crlPath string, cert []*x509.Certificate) error {
- // TODO: cache
- crlBytes, err := os.ReadFile(crlPath)
- if err != nil {
- return err
+ var crlBytes []byte
+
+ if v, ok := crlBytesMap.Load(crlPath); ok {
+ crlBytes = v.([]byte)
+ } else {
+ crlBytes, err := ioutil.ReadFile(crlPath)
+ if err != nil {
+ return err
+ }
+ crlBytesMap.Store(crlPath, crlBytes)
}
+
certList, err := x509.ParseCRL(crlBytes)
if err != nil {
return err
@@ -222,8 +233,7 @@ func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string
func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
// reverse lookup
- var names []string
- var wildcards []string
+ wildcards, names := []string{}, []string{}
for _, dns := range dnsNames {
if strings.HasPrefix(dns, "*.") {
wildcards = append(wildcards, dns[1:])
diff --git a/client_sdk/pkg/transport/sockopt.go b/client_sdk/pkg/transport/sockopt.go
new file mode 100644
index 00000000000..278deb7c7a9
--- /dev/null
+++ b/client_sdk/pkg/transport/sockopt.go
@@ -0,0 +1,41 @@
+package transport
+
+import (
+ "syscall"
+)
+
+type Controls []func(network, addr string, conn syscall.RawConn) error
+
+func (ctls Controls) Control(network, addr string, conn syscall.RawConn) error {
+ for _, s := range ctls {
+ if err := s(network, addr, conn); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type SocketOpts struct {
+ // [1] https://man7.org/linux/man-pages/man7/socket.7.html
+ // 启用在listener上设置套接字选项SO_REUSEPORT.允许重新绑定一个已经在使用的端口.
+ // 用户应该记住.在这种情况下.数据文件上的锁可能会导致意外情况的发生.用户应该注意防止锁竞争.
+ ReusePort bool
+ // ReuseAddress启用了一个套接字选项SO_REUSEADDR.允许绑定到`TIME_WAIT`状态下的地址.在etcd因过多的`TIME_WAIT'而缓慢重启的情况下.这对提高MTTR很有用.
+ // [1] https://man7.org/linux/man-pages/man7/socket.7.html
+ ReuseAddress bool
+}
+
+func getControls(sopts *SocketOpts) Controls {
+ ctls := Controls{}
+ if sopts.ReuseAddress {
+ ctls = append(ctls, setReuseAddress)
+ }
+ if sopts.ReusePort {
+ ctls = append(ctls, setReusePort)
+ }
+ return ctls
+}
+
+func (sopts *SocketOpts) Empty() bool {
+ return !sopts.ReuseAddress && !sopts.ReusePort
+}
diff --git a/client_sdk/pkg/transport/sockopt_unix.go b/client_sdk/pkg/transport/sockopt_unix.go
new file mode 100644
index 00000000000..432b52e0fce
--- /dev/null
+++ b/client_sdk/pkg/transport/sockopt_unix.go
@@ -0,0 +1,22 @@
+//go:build !windows
+// +build !windows
+
+package transport
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func setReusePort(network, address string, conn syscall.RawConn) error {
+ return conn.Control(func(fd uintptr) {
+ syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1)
+ })
+}
+
+func setReuseAddress(network, address string, conn syscall.RawConn) error {
+ return conn.Control(func(fd uintptr) {
+ syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1)
+ })
+}
diff --git a/client_sdk/pkg/transport/sockopt_windows.go b/client_sdk/pkg/transport/sockopt_windows.go
new file mode 100644
index 00000000000..4e5af70b11e
--- /dev/null
+++ b/client_sdk/pkg/transport/sockopt_windows.go
@@ -0,0 +1,19 @@
+//go:build windows
+// +build windows
+
+package transport
+
+import (
+ "fmt"
+ "syscall"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+ return fmt.Errorf("port reuse is not supported on Windows")
+}
+
+// Windows supports SO_REUSEADDR, but it may cause undefined behavior, as
+// there is no protection against port hijacking.
+func setReuseAddress(network, addr string, conn syscall.RawConn) error {
+ return fmt.Errorf("address reuse is not supported on Windows")
+}
diff --git a/client/pkg/transport/timeout_conn.go b/client_sdk/pkg/transport/timeout_conn.go
similarity index 100%
rename from client/pkg/transport/timeout_conn.go
rename to client_sdk/pkg/transport/timeout_conn.go
diff --git a/client/pkg/transport/timeout_dialer.go b/client_sdk/pkg/transport/timeout_dialer.go
similarity index 100%
rename from client/pkg/transport/timeout_dialer.go
rename to client_sdk/pkg/transport/timeout_dialer.go
diff --git a/client/pkg/transport/timeout_listener.go b/client_sdk/pkg/transport/timeout_listener.go
similarity index 94%
rename from client/pkg/transport/timeout_listener.go
rename to client_sdk/pkg/transport/timeout_listener.go
index 5d74bd70c23..1a142adf603 100644
--- a/client/pkg/transport/timeout_listener.go
+++ b/client_sdk/pkg/transport/timeout_listener.go
@@ -17,6 +17,8 @@ package transport
import (
"net"
"time"
+
+ "github.com/ls-2018/etcd_cn/code_debug/conn"
)
// NewTimeoutListener returns a listener that listens on the given address.
@@ -37,6 +39,7 @@ func (rwln *rwTimeoutListener) Accept() (net.Conn, error) {
if err != nil {
return nil, err
}
+ conn.PrintConn("rwTimeoutListener", c)
return timeoutConn{
Conn: c,
writeTimeout: rwln.writeTimeout,
diff --git a/client_sdk/pkg/transport/timeout_transport.go b/client_sdk/pkg/transport/timeout_transport.go
new file mode 100644
index 00000000000..85e28b6ec58
--- /dev/null
+++ b/client_sdk/pkg/transport/timeout_transport.go
@@ -0,0 +1,49 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// NewTimeoutTransport 返回一个使用给定的TLS信息创建的传输.
+// 如果创建的连接上的读/写块超过了它的时间限制. 它将返回超时错误.
+// 如果读/写超时被设置,传输将不能重新使用连接.
+func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) {
+ tr, err := NewTransport(info, dialtimeoutd)
+ if err != nil {
+ return nil, err
+ }
+
+ if rdtimeoutd != 0 || wtimeoutd != 0 {
+ // 超时的连接在闲置后很快就会超时,它不应该被放回http传输系统作为闲置连接供将来使用.
+ tr.MaxIdleConnsPerHost = -1
+ } else {
+ // 允许peer之间有更多的空闲连接,以避免不必要的端口分配.
+ tr.MaxIdleConnsPerHost = 1024
+ }
+
+ tr.Dial = (&rwTimeoutDialer{
+ Dialer: net.Dialer{
+ Timeout: dialtimeoutd,
+ KeepAlive: 30 * time.Second,
+ },
+ rdtimeoutd: rdtimeoutd,
+ wtimeoutd: wtimeoutd,
+ }).Dial
+ return tr, nil
+}
diff --git a/client/pkg/transport/tls.go b/client_sdk/pkg/transport/tls.go
similarity index 88%
rename from client/pkg/transport/tls.go
rename to client_sdk/pkg/transport/tls.go
index d5375863fd5..62fe0d38519 100644
--- a/client/pkg/transport/tls.go
+++ b/client_sdk/pkg/transport/tls.go
@@ -15,8 +15,6 @@
package transport
import (
- "context"
- "errors"
"fmt"
"strings"
"time"
@@ -29,8 +27,6 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
if err != nil {
return nil, err
}
- defer t.CloseIdleConnections()
-
var errs []string
var endpoints []string
for _, ep := range eps {
@@ -38,7 +34,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
errs = append(errs, fmt.Sprintf("%q is insecure", ep))
continue
}
- conn, cerr := t.DialContext(context.Background(), "tcp", ep[len("https://"):])
+ conn, cerr := t.Dial("tcp", ep[len("https://"):])
if cerr != nil {
errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr))
continue
@@ -47,7 +43,7 @@ func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
endpoints = append(endpoints, ep)
}
if len(errs) != 0 {
- err = errors.New(strings.Join(errs, ","))
+ err = fmt.Errorf("%s", strings.Join(errs, ","))
}
return endpoints, err
}
diff --git a/client_sdk/pkg/transport/transport.go b/client_sdk/pkg/transport/transport.go
new file mode 100644
index 00000000000..30c72bd8abe
--- /dev/null
+++ b/client_sdk/pkg/transport/transport.go
@@ -0,0 +1,76 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "strings"
+ "time"
+)
+
+type unixTransport struct{ *http.Transport }
+
+// NewTransport 创建transport
+func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) {
+ cfg, err := info.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ t := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: dialtimeoutd,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: cfg,
+ }
+
+ dialer := &net.Dialer{
+ Timeout: dialtimeoutd,
+ KeepAlive: 30 * time.Second,
+ }
+
+ dialContext := func(ctx context.Context, net, addr string) (net.Conn, error) {
+ return dialer.DialContext(ctx, "unix", addr)
+ }
+ tu := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialContext,
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: cfg,
+ // Cost of reopening connection on sockets is low, and they are mostly used in testing.
+ // Long living unix-transport connections were leading to 'leak' test flakes.
+ // Alternativly the returned Transport (t) should override CloseIdleConnections to
+ // forward it to 'tu' as well.
+ IdleConnTimeout: time.Microsecond,
+ }
+ ut := &unixTransport{tu}
+
+ t.RegisterProtocol("unix", ut)
+ t.RegisterProtocol("unixs", ut)
+
+ return t, nil
+}
+
+func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ url := *req.URL
+ req.URL = &url
+ req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1)
+ return urt.Transport.RoundTrip(req)
+}
diff --git a/client/pkg/transport/unix_listener.go b/client_sdk/pkg/transport/unix_listener.go
similarity index 100%
rename from client/pkg/transport/unix_listener.go
rename to client_sdk/pkg/transport/unix_listener.go
diff --git a/client/pkg/types/doc.go b/client_sdk/pkg/types/doc.go
similarity index 100%
rename from client/pkg/types/doc.go
rename to client_sdk/pkg/types/doc.go
diff --git a/client_sdk/pkg/types/over_id.go b/client_sdk/pkg/types/over_id.go
new file mode 100644
index 00000000000..8799a1dbb16
--- /dev/null
+++ b/client_sdk/pkg/types/over_id.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import "strconv"
+
+// ID 代表一个通用的标识符,通常存储为uint64,但在输入/输出时通常表示为16进制字符串.
+type ID uint64
+
+func (i ID) String() string {
+ return strconv.FormatUint(uint64(i), 16)
+}
+
+// IDFromString 8e9e05c52164694d
+func IDFromString(s string) (ID, error) {
+ i, err := strconv.ParseUint(s, 16, 64)
+ return ID(i), err
+}
+
+type IDSlice []ID
+
+func (p IDSlice) Len() int { return len(p) }
+func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
+func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/client/pkg/types/set.go b/client_sdk/pkg/types/set.go
similarity index 97%
rename from client/pkg/types/set.go
rename to client_sdk/pkg/types/set.go
index 3e69c8d8b94..5b2f3796fb6 100644
--- a/client/pkg/types/set.go
+++ b/client_sdk/pkg/types/set.go
@@ -48,7 +48,7 @@ type unsafeSet struct {
d map[string]struct{}
}
-// Add adds a new value to the set (no-op if the value is already present)
+// Add 将一个新的值添加到集合中(如果该值已经存在,则不做任何操作).
func (us *unsafeSet) Add(value string) {
us.d[value] = struct{}{}
}
@@ -90,7 +90,7 @@ func (us *unsafeSet) Length() int {
// Values returns the values of the Set in an unspecified order.
func (us *unsafeSet) Values() (values []string) {
- values = make([]string, 0, len(us.d))
+ values = make([]string, 0)
for val := range us.d {
values = append(values, val)
}
diff --git a/client/pkg/types/slice.go b/client_sdk/pkg/types/slice.go
similarity index 100%
rename from client/pkg/types/slice.go
rename to client_sdk/pkg/types/slice.go
diff --git a/client/pkg/types/urls.go b/client_sdk/pkg/types/urls.go
similarity index 80%
rename from client/pkg/types/urls.go
rename to client_sdk/pkg/types/urls.go
index 49a38967e64..021195857aa 100644
--- a/client/pkg/types/urls.go
+++ b/client_sdk/pkg/types/urls.go
@@ -25,6 +25,7 @@ import (
type URLs []url.URL
+// NewURLs OK
func NewURLs(strs []string) (URLs, error) {
all := make([]url.URL, len(strs))
if len(all) == 0 {
@@ -36,25 +37,20 @@ func NewURLs(strs []string) (URLs, error) {
if err != nil {
return nil, err
}
-
- switch u.Scheme {
- case "http", "https":
- if _, _, err := net.SplitHostPort(u.Host); err != nil {
- return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
- }
-
- if u.Path != "" {
- return nil, fmt.Errorf("URL must not contain a path: %s", in)
- }
- case "unix", "unixs":
- break
- default:
- return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
+ if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
+ return nil, fmt.Errorf("URL scheme必须是http, https, unix, or unixs: %s", in)
+ }
+ if _, _, err := net.SplitHostPort(u.Host); err != nil {
+ return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
+ }
+ if u.Path != "" {
+ return nil, fmt.Errorf("URL must not contain a path: %s", in)
}
all[i] = *u
}
us := URLs(all)
us.Sort()
+
return us, nil
}
diff --git a/client/pkg/types/urlsmap.go b/client_sdk/pkg/types/urlsmap.go
similarity index 82%
rename from client/pkg/types/urlsmap.go
rename to client_sdk/pkg/types/urlsmap.go
index 47690cc381a..074d4e77dc5 100644
--- a/client/pkg/types/urlsmap.go
+++ b/client_sdk/pkg/types/urlsmap.go
@@ -20,12 +20,12 @@ import (
"strings"
)
-// URLsMap is a map from a name to its URLs.
+// URLsMap 节点名字与通信地址的对应
type URLsMap map[string]URLs
-// NewURLsMap returns a URLsMap instantiated from the given string,
-// which consists of discovery-formatted names-to-URLs, like:
+// NewURLsMap 返回URLsMap 【节点名字与通信地址的对应】
// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
+// 类型转换
func NewURLsMap(s string) (URLsMap, error) {
m := parse(s)
@@ -54,7 +54,7 @@ func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
return um, nil
}
-// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
+// String 返回mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
func (c URLsMap) String() string {
var pairs []string
for name, urls := range c {
@@ -66,8 +66,7 @@ func (c URLsMap) String() string {
return strings.Join(pairs, ",")
}
-// URLs returns a list of all URLs.
-// The returned list is sorted in ascending lexicographical order.
+// URLs 返回所有的URLS
func (c URLsMap) URLs() []string {
var urls []string
for _, us := range c {
@@ -79,12 +78,11 @@ func (c URLsMap) URLs() []string {
return urls
}
-// Len returns the size of URLsMap.
func (c URLsMap) Len() int {
return len(c)
}
-// parse parses the given string and returns a map listing the values specified for each key.
+// parse 解析给定的字符串,并返回一个列出每个键的指定值的map.
func parse(s string) map[string][]string {
m := make(map[string][]string)
for s != "" {
diff --git a/client_sdk/v2/README.md b/client_sdk/v2/README.md
new file mode 100644
index 00000000000..284b7124902
--- /dev/null
+++ b/client_sdk/v2/README.md
@@ -0,0 +1,127 @@
+# etcd/client
+
+etcd/client is the Go client library for etcd.
+
+[![GoDoc](https://godoc.org/github.com/ls-2018/etcd_cn/client?status.png)](https://godoc.org/github.com/ls-2018/etcd_cn/client)
+
+For full compatibility, it is recommended to install released versions of clients using go modules.
+
+## Install
+
+```bash
+go get github.com/ls-2018/etcd_cn/client
+```
+
+## Usage
+
+```go
+package main
+
+import (
+ "log"
+ "time"
+ "context"
+
+ "github.com/ls-2018/etcd_cn/client"
+)
+
+func main() {
+ cfg := client.Config{
+ Endpoints: []string{"http://127.0.0.1:2379"},
+ Transport: client.DefaultTransport,
+ // set timeout per request to fail fast when the target endpoint is unavailable
+ HeaderTimeoutPerRequest: time.Second,
+ }
+ c, err := client.New(cfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+ kapi := client.NewKeysAPI(c)
+ // set "/foo" key with "bar" value
+ log.Print("Setting '/foo' key with 'bar' value")
+ resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
+ if err != nil {
+ log.Fatal(err)
+ } else {
+ // print common key info
+ log.Printf("Set is done. Metadata is %q\n", resp)
+ }
+ // get "/foo" key's value
+ log.Print("Getting '/foo' key value")
+ resp, err = kapi.Get(context.Background(), "/foo", nil)
+ if err != nil {
+ log.Fatal(err)
+ } else {
+ // print common key info
+ log.Printf("Get is done. Metadata is %q\n", resp)
+ // print value
+ log.Printf("%q key has %q value\n", resp.NodeExtern.Key, resp.NodeExtern.Value)
+ }
+}
+```
+
+## Error Handling
+
+etcd client might return three types of errors.
+
+- context error
+
+Each API call has its first parameter as `context`. A context can backend canceled or have an attached deadline. If the
+context is canceled or reaches its deadline, the responding context error will backend returned no matter what internal
+errors the API call has already encountered.
+
+- cluster error
+
+Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a
+requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will backend added into a
+list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will backend returned.
+
+- response error
+
+If the response gets from the cluster is invalid, a plain string error will backend returned. For example, it might backend a
+invalid JSON error.
+
+Here is the example code to handle client errors:
+
+```go
+cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
+c, err := client.New(cfg)
+if err != nil {
+ log.Fatal(err)
+}
+
+kapi := client.NewKeysAPI(c)
+resp, err := kapi.Set(ctx, "test", "bar", nil)
+if err != nil {
+ if err == context.Canceled {
+ // ctx is canceled by another routine
+ } else if err == context.DeadlineExceeded {
+ // ctx is attached with a deadline and it exceeded
+ } else if cerr, ok := err.(*client.ClusterError); ok {
+ // process (cerr.Errors)
+ } else {
+ // bad cluster endpoints, which are not etcd servers
+ }
+}
+```
+
+## Caveat
+
+1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket
+ resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from
+ the data consumed by the client because data replicated to each etcd member has already passed through the consensus
+ process.
+
+2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning
+ properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first
+ attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all
+ available endpoints, it will return all errors happened.
+
+3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't
+ help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve
+ this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped,
+ but the connection is kept alive, hasn't been brought to our attention.
+
+4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is
+ isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read
+ requests or monitor the /health endpoint for member health information.
diff --git a/client_sdk/v2/auth_role.go b/client_sdk/v2/auth_role.go
new file mode 100644
index 00000000000..ef3aba6ce48
--- /dev/null
+++ b/client_sdk/v2/auth_role.go
@@ -0,0 +1,66 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+)
+
+type Role struct {
+ Role string `json:"role"`
+ Permissions Permissions `json:"permissions"`
+ Grant *Permissions `json:"grant,omitempty"`
+ Revoke *Permissions `json:"revoke,omitempty"`
+}
+
+type Permissions struct {
+ KV rwPermission `json:"kv"`
+}
+
+type rwPermission struct {
+ Read []string `json:"read"`
+ Write []string `json:"write"`
+}
+
+type PermissionType int
+
+const (
+ ReadPermission PermissionType = iota
+ WritePermission
+ ReadWritePermission
+)
+
+// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
+// interact with etcd's role creation and modification features.
+
+type AuthRoleAPI interface {
+ // AddRole adds a role.
+ AddRole(ctx context.Context, role string) error
+
+ // RemoveRole removes a role.
+ RemoveRole(ctx context.Context, role string) error
+
+ // GetRole retrieves role details.
+ GetRole(ctx context.Context, role string) (*Role, error)
+
+ // GrantRoleKV grants a role some permission prefixes for the KV store.
+ GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
+
+ // RevokeRoleKV revokes some permission prefixes for a role on the KV store.
+ RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
+
+ // ListRoles lists roles.
+ ListRoles(ctx context.Context) ([]string, error)
+}
diff --git a/client_sdk/v2/auth_user.go b/client_sdk/v2/auth_user.go
new file mode 100644
index 00000000000..f5d773c4eae
--- /dev/null
+++ b/client_sdk/v2/auth_user.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "net/url"
+ "path"
+)
+
+var defaultV2AuthPrefix = "/v2/auth"
+
+type User struct {
+ User string `json:"user"`
+ Password string `json:"password,omitempty"`
+ Roles []string `json:"roles"`
+ Grant []string `json:"grant,omitempty"`
+ Revoke []string `json:"revoke,omitempty"`
+}
+
+type UserRoles struct {
+ User string `json:"user"`
+ Roles []Role `json:"roles"`
+}
+
+func v2AuthURL(ep url.URL, action string, name string) *url.URL {
+ if name != "" {
+ ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
+ return &ep
+ }
+ ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
+ return &ep
+}
+
+type AuthAPI interface {
+ // Enable auth.
+ Enable(ctx context.Context) error
+
+ // Disable auth.
+ Disable(ctx context.Context) error
+}
+
+type authError struct {
+ Message string `json:"message"`
+ Code int `json:"-"`
+}
+
+func (e authError) Error() string {
+ return e.Message
+}
+
+// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
+// interact with etcd's user creation and modification features.
+
+type AuthUserAPI interface {
+ // AddUser adds a user.
+ AddUser(ctx context.Context, username string, password string) error
+
+ // RemoveUser removes a user.
+ RemoveUser(ctx context.Context, username string) error
+
+ // GetUser retrieves user details.
+ GetUser(ctx context.Context, username string) (*User, error)
+
+ // GrantUser grants a user some permission roles.
+ GrantUser(ctx context.Context, username string, roles []string) (*User, error)
+
+ // RevokeUser revokes some permission roles from a user.
+ RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
+
+ // ChangePassword changes the user's password.
+ ChangePassword(ctx context.Context, username string, password string) (*User, error)
+
+ // ListUsers lists the users.
+ ListUsers(ctx context.Context) ([]string, error)
+}
diff --git a/client/v2/cancelreq.go b/client_sdk/v2/cancelreq.go
similarity index 100%
rename from client/v2/cancelreq.go
rename to client_sdk/v2/cancelreq.go
diff --git a/client_sdk/v2/client.go b/client_sdk/v2/client.go
new file mode 100644
index 00000000000..4347bfb6b48
--- /dev/null
+++ b/client_sdk/v2/client.go
@@ -0,0 +1,717 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+)
+
+var (
+ ErrNoEndpoints = errors.New("client: no endpoints available")
+ ErrTooManyRedirects = errors.New("client: too many redirects")
+ ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
+ ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
+ errTooManyRedirectChecks = errors.New("client: too many redirect checks")
+
+ // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
+ // that Do() will not retry a request
+ oneShotCtxValue interface{}
+)
+
+var DefaultRequestTimeout = 5 * time.Second
+
+var DefaultTransport CancelableTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ TLSHandshakeTimeout: 10 * time.Second,
+}
+
+type EndpointSelectionMode int
+
+const (
+ // EndpointSelectionRandom is the default value of the 'SelectionMode'.
+ // As the name implies, the client object will pick a node from the members
+ // of the cluster in a random fashion. If the cluster has three members, A, B,
+ // and C, the client picks any node from its three members as its request
+ // destination.
+ EndpointSelectionRandom EndpointSelectionMode = iota
+
+ // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
+ // requests are sent directly to the cluster leader. This reduces
+ // forwarding roundtrips compared to making requests to etcd followers
+ // who then forward them to the cluster leader. In the event of a leader
+ // failure, however, clients configured this way cannot prioritize among
+ // the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
+ // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
+ // maintain its knowledge of current cluster state.
+ //
+ // This mode should be used with Client.AutoSync().
+ EndpointSelectionPrioritizeLeader
+)
+
+type Config struct {
+ // Endpoints defines a set of URLs (schemes, hosts and ports only)
+ // that can be used to communicate with a logical etcd cluster. For
+ // example, a three-node cluster could be provided like so:
+ //
+ // Endpoints: []string{
+ // "http://node1.example.com:2379",
+ // "http://node2.example.com:2379",
+ // "http://node3.example.com:2379",
+ // }
+ //
+ // If multiple endpoints are provided, the Client will attempt to
+ // use them all in the event that one or more of them are unusable.
+ //
+ // If Client.Sync is ever called, the Client may cache an alternate
+ // set of endpoints to continue operation.
+ Endpoints []string
+
+ // Transport is used by the Client to drive HTTP requests. If not
+ // provided, DefaultTransport will be used.
+ Transport CancelableTransport
+
+ // CheckRedirect specifies the policy for handling HTTP redirects.
+ // If CheckRedirect is not nil, the Client calls it before
+ // following an HTTP redirect. The sole argument is the number of
+ // requests that have already been made. If CheckRedirect returns
+ // an error, Client.Do will not make any further requests and return
+ // the error back it to the caller.
+ //
+ // If CheckRedirect is nil, the Client uses its default policy,
+ // which is to stop after 10 consecutive requests.
+ CheckRedirect CheckRedirectFunc
+
+ // Username specifies the user credential to add as an authorization header
+ Username string
+
+ // Password is the password for the specified user to add as an authorization header
+ // to the request.
+ Password string
+
+ // HeaderTimeoutPerRequest specifies the time limit to wait for response
+ // header in a single request made by the Client. The timeout includes
+ // connection time, any redirects, and header wait time.
+ //
+ // For non-watch GET request, etcd returns the response body immediately.
+ // For PUT/POST/DELETE request, etcd will attempt to commit request
+ // before responding, which is expected to take `100ms + 2 * RTT`.
+ // For watch request, etcd returns the header immediately to notify Client
+ // watch start. But if etcd is behind some kind of proxy, the response
+ // header may be cached at proxy, and Client cannot rely on this behavior.
+ //
+ // Especially, wait request will ignore this timeout.
+ //
+ // One API call may send multiple requests to different etcd servers until it
+ // succeeds. Use context of the API to specify the overall timeout.
+ //
+ // A HeaderTimeoutPerRequest of zero means no timeout.
+ HeaderTimeoutPerRequest time.Duration
+
+ // SelectionMode is an EndpointSelectionMode enum that specifies the
+ // policy for choosing the etcd cluster node to which requests are sent.
+ SelectionMode EndpointSelectionMode
+}
+
+func (cfg *Config) transport() CancelableTransport {
+ if cfg.Transport == nil {
+ return DefaultTransport
+ }
+ return cfg.Transport
+}
+
+func (cfg *Config) checkRedirect() CheckRedirectFunc {
+ if cfg.CheckRedirect == nil {
+ return DefaultCheckRedirect
+ }
+ return cfg.CheckRedirect
+}
+
+// CancelableTransport mimics net/http.Transport, but requires that
+// the object also support request cancellation.
+type CancelableTransport interface {
+ http.RoundTripper
+ CancelRequest(req *http.Request)
+}
+
+type CheckRedirectFunc func(via int) error
+
+// DefaultCheckRedirect follows up to 10 redirects, but no more.
+var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
+ if via > 10 {
+ return ErrTooManyRedirects
+ }
+ return nil
+}
+
+type Client interface {
+ // Sync updates the internal cache of the etcd cluster's membership.
+ Sync(context.Context) error
+
+ // AutoSync periodically calls Sync() every given interval.
+ // The recommended sync interval is 10 seconds to 1 minute, which does
+ // not bring too much overhead to etcd and makes client catch up the
+ // cluster change in time.
+ //
+ // The example to use it:
+ //
+ // for {
+ // err := client.AutoSync(ctx, 10*time.Second)
+ // if err == context.DeadlineExceeded || err == context.Canceled {
+ // break
+ // }
+ // log.Print(err)
+ // }
+ AutoSync(context.Context, time.Duration) error
+
+ // Endpoints returns a copy of the current set of API endpoints used
+ // by Client to resolve HTTP requests. If Sync has ever been called,
+ // this may differ from the initial Endpoints provided in the Config.
+ Endpoints() []string
+
+ // SetEndpoints sets the set of API endpoints used by Client to resolve
+ // HTTP requests. If the given endpoints are not valid, an error will be
+ // returned
+ SetEndpoints(eps []string) error
+
+ // GetVersion retrieves the current etcd etcd and cluster version
+ GetVersion(ctx context.Context) (*version.Versions, error)
+
+ httpClient
+}
+
+func New(cfg Config) (Client, error) {
+ c := &httpClusterClient{
+ clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
+ rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
+ selectionMode: cfg.SelectionMode,
+ }
+ if cfg.Username != "" {
+ c.credentials = &credentials{
+ username: cfg.Username,
+ password: cfg.Password,
+ }
+ }
+ if err := c.SetEndpoints(cfg.Endpoints); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+type httpClient interface {
+ Do(context.Context, httpAction) (*http.Response, []byte, error)
+}
+
+func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
+ return func(ep url.URL) httpClient {
+ return &redirectFollowingHTTPClient{
+ checkRedirect: cr,
+ client: &simpleHTTPClient{
+ transport: tr,
+ endpoint: ep,
+ headerTimeout: headerTimeout,
+ },
+ }
+ }
+}
+
+type credentials struct {
+ username string
+ password string
+}
+
+type httpClientFactory func(url.URL) httpClient
+
+type httpAction interface {
+ HTTPRequest(url.URL) *http.Request
+}
+
+type httpClusterClient struct {
+ clientFactory httpClientFactory
+ endpoints []url.URL
+ pinned int
+ credentials *credentials
+ sync.RWMutex
+ rand *rand.Rand
+ selectionMode EndpointSelectionMode
+}
+
+func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
+ ceps := make([]url.URL, len(eps))
+ copy(ceps, eps)
+
+ // To perform a lookup on the new endpoint list without using the current
+ // client, we'll copy it
+ clientCopy := &httpClusterClient{
+ clientFactory: c.clientFactory,
+ credentials: c.credentials,
+ rand: c.rand,
+
+ pinned: 0,
+ endpoints: ceps,
+ }
+
+ mAPI := NewMembersAPI(clientCopy)
+ leader, err := mAPI.Leader(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(leader.ClientURLs) == 0 {
+ return "", ErrNoLeaderEndpoint
+ }
+
+ return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
+}
+
+func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
+ if len(eps) == 0 {
+ return []url.URL{}, ErrNoEndpoints
+ }
+
+ neps := make([]url.URL, len(eps))
+ for i, ep := range eps {
+ u, err := url.Parse(ep)
+ if err != nil {
+ return []url.URL{}, err
+ }
+ neps[i] = *u
+ }
+ return neps, nil
+}
+
+func (c *httpClusterClient) SetEndpoints(eps []string) error {
+ neps, err := c.parseEndpoints(eps)
+ if err != nil {
+ return err
+ }
+
+ c.Lock()
+ defer c.Unlock()
+
+ c.endpoints = shuffleEndpoints(c.rand, neps)
+ // We're not doing anything for PrioritizeLeader here. This is
+ // due to not having a context meaning we can't call getLeaderEndpoint
+ // However, if you're using PrioritizeLeader, you've already been told
+ // to regularly call sync, where we do have a ctx, and can figure the
+ // leader. PrioritizeLeader is also quite a loose guarantee, so deal
+ // with it
+ c.pinned = 0
+
+ return nil
+}
+
+func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ action := act
+ c.RLock()
+ leps := len(c.endpoints)
+ eps := make([]url.URL, leps)
+ n := copy(eps, c.endpoints)
+ pinned := c.pinned
+
+ if c.credentials != nil {
+ action = &authedAction{
+ act: act,
+ credentials: *c.credentials,
+ }
+ }
+ c.RUnlock()
+
+ if leps == 0 {
+ return nil, nil, ErrNoEndpoints
+ }
+
+ if leps != n {
+ return nil, nil, errors.New("unable to pick endpoint: copy failed")
+ }
+
+ var resp *http.Response
+ var body []byte
+ var err error
+ cerr := &ClusterError{}
+ isOneShot := ctx.Value(&oneShotCtxValue) != nil
+
+ for i := pinned; i < leps+pinned; i++ {
+ k := i % leps
+ hc := c.clientFactory(eps[k])
+ resp, body, err = hc.Do(ctx, action)
+ if err != nil {
+ cerr.Errors = append(cerr.Errors, err)
+ if err == ctx.Err() {
+ return nil, nil, ctx.Err()
+ }
+ if err == context.Canceled || err == context.DeadlineExceeded {
+ return nil, nil, err
+ }
+ } else if resp.StatusCode/100 == 5 {
+ switch resp.StatusCode {
+ case http.StatusInternalServerError, http.StatusServiceUnavailable:
+ // TODO: make sure this is a no leader response
+ cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
+ default:
+ cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns etcd error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
+ }
+ err = cerr.Errors[0]
+ }
+ if err != nil {
+ if !isOneShot {
+ continue
+ }
+ c.Lock()
+ c.pinned = (k + 1) % leps
+ c.Unlock()
+ return nil, nil, err
+ }
+ if k != pinned {
+ c.Lock()
+ c.pinned = k
+ c.Unlock()
+ }
+ return resp, body, nil
+ }
+
+ return nil, nil, cerr
+}
+
+func (c *httpClusterClient) Endpoints() []string {
+ c.RLock()
+ defer c.RUnlock()
+
+ eps := make([]string, len(c.endpoints))
+ for i, ep := range c.endpoints {
+ eps[i] = ep.String()
+ }
+
+ return eps
+}
+
+func (c *httpClusterClient) Sync(ctx context.Context) error {
+ mAPI := NewMembersAPI(c)
+ ms, err := mAPI.List(ctx)
+ if err != nil {
+ return err
+ }
+
+ var eps []string
+ for _, m := range ms {
+ eps = append(eps, m.ClientURLs...)
+ }
+
+ neps, err := c.parseEndpoints(eps)
+ if err != nil {
+ return err
+ }
+
+ npin := 0
+
+ switch c.selectionMode {
+ case EndpointSelectionRandom:
+ c.RLock()
+ eq := endpointsEqual(c.endpoints, neps)
+ c.RUnlock()
+
+ if eq {
+ return nil
+ }
+ // When items in the endpoint list changes, we choose a new pin
+ neps = shuffleEndpoints(c.rand, neps)
+ case EndpointSelectionPrioritizeLeader:
+ nle, err := c.getLeaderEndpoint(ctx, neps)
+ if err != nil {
+ return ErrNoLeaderEndpoint
+ }
+
+ for i, n := range neps {
+ if n.String() == nle {
+ npin = i
+ break
+ }
+ }
+ default:
+ return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
+ }
+
+ c.Lock()
+ defer c.Unlock()
+ c.endpoints = neps
+ c.pinned = npin
+
+ return nil
+}
+
+func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+ for {
+ err := c.Sync(ctx)
+ if err != nil {
+ return err
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-ticker.C:
+ }
+ }
+}
+
+func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
+ act := &getAction{Prefix: "/version"}
+
+ resp, body, err := c.Do(ctx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ switch resp.StatusCode {
+ case http.StatusOK:
+ if len(body) == 0 {
+ return nil, ErrEmptyBody
+ }
+ var vresp version.Versions
+ if err := json.Unmarshal(body, &vresp); err != nil {
+ return nil, ErrInvalidJSON
+ }
+ return &vresp, nil
+ default:
+ var etcdErr Error
+ if err := json.Unmarshal(body, &etcdErr); err != nil {
+ return nil, ErrInvalidJSON
+ }
+ return nil, etcdErr
+ }
+}
+
+type roundTripResponse struct {
+ resp *http.Response
+ err error
+}
+
+type simpleHTTPClient struct {
+ transport CancelableTransport
+ endpoint url.URL
+ headerTimeout time.Duration
+}
+
+// ErrNoRequest indicates that the HTTPRequest object could not be found
+// or was nil. No processing could continue.
+var ErrNoRequest = errors.New("no HTTPRequest was available")
+
+func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ req := act.HTTPRequest(c.endpoint)
+ if req == nil {
+ return nil, nil, ErrNoRequest
+ }
+
+ if err := printcURL(req); err != nil {
+ return nil, nil, err
+ }
+
+ isWait := false
+ if req.URL != nil {
+ ws := req.URL.Query().Get("wait")
+ if len(ws) != 0 {
+ var err error
+ isWait, err = strconv.ParseBool(ws)
+ if err != nil {
+ return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
+ }
+ }
+ }
+
+ var hctx context.Context
+ var hcancel context.CancelFunc
+ if !isWait && c.headerTimeout > 0 {
+ hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
+ } else {
+ hctx, hcancel = context.WithCancel(ctx)
+ }
+ defer hcancel()
+
+ reqcancel := requestCanceler(c.transport, req)
+
+ rtchan := make(chan roundTripResponse, 1)
+ go func() {
+ resp, err := c.transport.RoundTrip(req)
+ rtchan <- roundTripResponse{resp: resp, err: err}
+ close(rtchan)
+ }()
+
+ var resp *http.Response
+ var err error
+
+ select {
+ case rtresp := <-rtchan:
+ resp, err = rtresp.resp, rtresp.err
+ case <-hctx.Done():
+ // cancel and wait for request to actually exit before continuing
+ reqcancel()
+ rtresp := <-rtchan
+ resp = rtresp.resp
+ switch {
+ case ctx.Err() != nil:
+ err = ctx.Err()
+ case hctx.Err() != nil:
+ err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
+ default:
+ panic("failed to get error from context")
+ }
+ }
+
+ // always check for resp nil-ness to deal with possible
+ // race conditions between channels above
+ defer func() {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body []byte
+ done := make(chan struct{})
+ go func() {
+ body, err = ioutil.ReadAll(resp.Body)
+ done <- struct{}{}
+ }()
+
+ select {
+ case <-ctx.Done():
+ resp.Body.Close()
+ <-done
+ return nil, nil, ctx.Err()
+ case <-done:
+ }
+
+ return resp, body, err
+}
+
+type authedAction struct {
+ act httpAction
+ credentials credentials
+}
+
+func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
+ r := a.act.HTTPRequest(url)
+ r.SetBasicAuth(a.credentials.username, a.credentials.password)
+ return r
+}
+
+type redirectFollowingHTTPClient struct {
+ client httpClient
+ checkRedirect CheckRedirectFunc
+}
+
+func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ next := act
+ for i := 0; i < 100; i++ {
+ if i > 0 {
+ if err := r.checkRedirect(i); err != nil {
+ return nil, nil, err
+ }
+ }
+ resp, body, err := r.client.Do(ctx, next)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp.StatusCode/100 == 3 {
+ hdr := resp.Header.Get("Location")
+ if hdr == "" {
+ return nil, nil, fmt.Errorf("location header not set")
+ }
+ loc, err := url.Parse(hdr)
+ if err != nil {
+ return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr)
+ }
+ next = &redirectedHTTPAction{
+ action: act,
+ location: *loc,
+ }
+ continue
+ }
+ return resp, body, nil
+ }
+
+ return nil, nil, errTooManyRedirectChecks
+}
+
+type redirectedHTTPAction struct {
+ action httpAction
+ location url.URL
+}
+
+func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
+ orig := r.action.HTTPRequest(ep)
+ orig.URL = &r.location
+ return orig
+}
+
+func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
+ // copied from Go 1.9<= rand.Rand.Perm
+ n := len(eps)
+ p := make([]int, n)
+ for i := 0; i < n; i++ {
+ j := r.Intn(i + 1)
+ p[i] = p[j]
+ p[j] = i
+ }
+ neps := make([]url.URL, n)
+ for i, k := range p {
+ neps[i] = eps[k]
+ }
+ return neps
+}
+
+func endpointsEqual(left, right []url.URL) bool {
+ if len(left) != len(right) {
+ return false
+ }
+
+ sLeft := make([]string, len(left))
+ sRight := make([]string, len(right))
+ for i, l := range left {
+ sLeft[i] = l.String()
+ }
+ for i, r := range right {
+ sRight[i] = r.String()
+ }
+
+ sort.Strings(sLeft)
+ sort.Strings(sRight)
+ for i := range sLeft {
+ if sLeft[i] != sRight[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/client/v2/cluster_error.go b/client_sdk/v2/cluster_error.go
similarity index 100%
rename from client/v2/cluster_error.go
rename to client_sdk/v2/cluster_error.go
diff --git a/client_sdk/v2/curl.go b/client_sdk/v2/curl.go
new file mode 100644
index 00000000000..82a92ad25a0
--- /dev/null
+++ b/client_sdk/v2/curl.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+)
+
+var cURLDebug = false
+
+// printcURL prints the cURL equivalent request to stderr.
+// It returns an error if the body of the request cannot
+// be read.
+// The caller MUST cancel the request if there is an error.
+func printcURL(req *http.Request) error {
+ if !cURLDebug {
+ return nil
+ }
+ var (
+ command string
+ b []byte
+ err error
+ )
+
+ if req.URL != nil {
+ command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
+ }
+
+ if req.Body != nil {
+ b, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return err
+ }
+ command += fmt.Sprintf(" -d %q", string(b))
+ }
+
+ fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
+
+ // reset body
+ body := bytes.NewBuffer(b)
+ req.Body = ioutil.NopCloser(body)
+
+ return nil
+}
diff --git a/client_sdk/v2/doc.go b/client_sdk/v2/doc.go
new file mode 100644
index 00000000000..3c3101e57ad
--- /dev/null
+++ b/client_sdk/v2/doc.go
@@ -0,0 +1,73 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package client provides bindings for the etcd APIs.
+
+Create a Config and exchange it for a Client:
+
+ import (
+ "net/http"
+ "context"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v2"
+ )
+
+ cfg := client.Config{
+ Endpoints: []string{"http://127.0.0.1:2379"},
+ Transport: DefaultTransport,
+ }
+
+ c, err := client.New(cfg)
+ if err != nil {
+ // handle error
+ }
+
+Clients are safe for concurrent use by multiple goroutines.
+
+Create a KeysAPI using the Client, then use it to interact with etcd:
+
+ kAPI := client.NewKeysAPI(c)
+
+ // create a new key /foo with the value "bar"
+ _, err = kAPI.Create(context.Background(), "/foo", "bar")
+ if err != nil {
+ // handle error
+ }
+
+ // delete the newly created key only if the value is still "bar"
+ _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
+ if err != nil {
+ // handle error
+ }
+
+Use a custom context to set timeouts on your operations:
+
+ import "time"
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ // set a new key, ignoring its previous state
+ _, err := kAPI.Set(ctx, "/ping", "pong", nil)
+ if err != nil {
+ if err == context.DeadlineExceeded {
+ // request took longer than 5s
+ } else {
+ // handle error
+ }
+ }
+
+*/
+package client
diff --git a/client_sdk/v2/json.go b/client_sdk/v2/json.go
new file mode 100644
index 00000000000..b3a65580bef
--- /dev/null
+++ b/client_sdk/v2/json.go
@@ -0,0 +1,72 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "strconv"
+ "unsafe"
+
+ "github.com/json-iterator/go"
+ "github.com/modern-go/reflect2"
+)
+
+type customNumberExtension struct {
+ jsoniter.DummyExtension
+}
+
+func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
+ if typ.String() == "interface {}" {
+ return customNumberDecoder{}
+ }
+ return nil
+}
+
+type customNumberDecoder struct{}
+
+func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ switch iter.WhatIsNext() {
+ case jsoniter.NumberValue:
+ var number jsoniter.Number
+ iter.ReadVal(&number)
+ i64, err := strconv.ParseInt(string(number), 10, 64)
+ if err == nil {
+ *(*interface{})(ptr) = i64
+ return
+ }
+ f64, err := strconv.ParseFloat(string(number), 64)
+ if err == nil {
+ *(*interface{})(ptr) = f64
+ return
+ }
+ iter.ReportError("DecodeNumber", err.Error())
+ default:
+ *(*interface{})(ptr) = iter.Read()
+ }
+}
+
+// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be
+// case-sensitive when unmarshalling, and otherwise compatible with
+// the encoding/json standard library.
+func caseSensitiveJsonIterator() jsoniter.API {
+ config := jsoniter.Config{
+ EscapeHTML: true,
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+ CaseSensitive: true,
+ }.Froze()
+ // Force jsoniter to decode number to interface{} via int64/float64, if possible.
+ config.RegisterExtension(&customNumberExtension{})
+ return config
+}
diff --git a/client/v2/keys.go b/client_sdk/v2/keys.go
similarity index 97%
rename from client/v2/keys.go
rename to client_sdk/v2/keys.go
index fa8fdc6b261..a4d14072fb5 100644
--- a/client/v2/keys.go
+++ b/client_sdk/v2/keys.go
@@ -25,9 +25,7 @@ import (
"strings"
"time"
- kjson "sigs.k8s.io/json"
-
- "go.etcd.io/etcd/client/pkg/v3/pathutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/pathutil"
)
const (
@@ -79,9 +77,7 @@ const (
PrevNoExist = PrevExistType("false")
)
-var (
- defaultV2KeysPrefix = "/v2/keys"
-)
+var defaultV2KeysPrefix = "/v2/keys"
// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
// API over HTTP.
@@ -166,7 +162,7 @@ type SetOptions struct {
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
- // Node must be in order for the Set operation to succeed.
+ // Node必须是in order for the Set operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
@@ -201,7 +197,7 @@ type GetOptions struct {
// should be returned.
Recursive bool
- // Sort instructs the server whether or not to sort the Nodes.
+ // Sort instructs the etcd whether or not to sort the Nodes.
// If true, the Nodes are sorted alphabetically by key in
// ascending order (A to z). If false (default), the Nodes will
// not be sorted and the ordering used should not be considered
@@ -224,7 +220,7 @@ type DeleteOptions struct {
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
- // Node must be in order for the Delete operation to succeed.
+ // Node必须是in order for the Delete operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
@@ -272,7 +268,7 @@ type Response struct {
// This index is not tied to the Node(s) contained in this Response.
Index uint64 `json:"-"`
- // ClusterID holds the cluster-level ID reported by the server. This
+ // ClusterID holds the cluster-level ID reported by the etcd. This
// should be different for different etcd clusters.
ClusterID string `json:"-"`
}
@@ -299,7 +295,7 @@ type Node struct {
// ModifiedIndex is the etcd index at-which this Node was last modified.
ModifiedIndex uint64 `json:"modifiedIndex"`
- // Expiration is the server side expiration time of the key.
+ // Expiration is the etcd side expiration time of the key.
Expiration *time.Time `json:"expiration,omitempty"`
// TTL is the time to live of the key in second.
@@ -472,7 +468,7 @@ func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
// v2KeysURL forms a URL representing the location of a key.
// The endpoint argument represents the base URL of an etcd
-// server. The prefix is the path needed to route from the
+// etcd. The prefix is the path needed to route from the
// provided endpoint's path to the root of the keys API
// (typically "/v2/keys").
func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
@@ -655,9 +651,11 @@ func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Resp
return res, err
}
+var jsonIterator = caseSensitiveJsonIterator()
+
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
var res Response
- err := kjson.UnmarshalCaseSensitivePreserveInts(body, &res)
+ err := jsonIterator.Unmarshal(body, &res)
if err != nil {
return nil, ErrInvalidJSON
}
diff --git a/client/v2/members.go b/client_sdk/v2/members.go
similarity index 98%
rename from client/v2/members.go
rename to client_sdk/v2/members.go
index 5e868ec6991..d911c466673 100644
--- a/client/v2/members.go
+++ b/client_sdk/v2/members.go
@@ -23,7 +23,7 @@ import (
"net/url"
"path"
- "go.etcd.io/etcd/client/pkg/v3/types"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
)
var (
@@ -130,7 +130,7 @@ func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
return nil, err
}
- return mCollection, nil
+ return []Member(mCollection), nil
}
func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
diff --git a/client_sdk/v3/auth.go b/client_sdk/v3/auth.go
new file mode 100644
index 00000000000..a544de4c08c
--- /dev/null
+++ b/client_sdk/v3/auth.go
@@ -0,0 +1,205 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/authpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "google.golang.org/grpc"
+)
+
+type (
+ AuthEnableResponse pb.AuthEnableResponse
+ AuthDisableResponse pb.AuthDisableResponse
+ AuthStatusResponse pb.AuthStatusResponse
+ AuthenticateResponse pb.AuthenticateResponse
+ AuthUserAddResponse pb.AuthUserAddResponse
+ AuthUserDeleteResponse pb.AuthUserDeleteResponse
+ AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
+ AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
+ AuthUserGetResponse pb.AuthUserGetResponse
+ AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
+ AuthRoleAddResponse pb.AuthRoleAddResponse
+ AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
+ AuthRoleGetResponse pb.AuthRoleGetResponse
+ AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
+ AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
+ AuthUserListResponse pb.AuthUserListResponse
+ AuthRoleListResponse pb.AuthRoleListResponse
+
+ PermissionType authpb.Permission_Type
+ Permission authpb.Permission
+)
+
+const (
+ PermRead = authpb.READ
+ PermWrite = authpb.WRITE
+ PermReadWrite = authpb.READWRITE
+)
+
+type UserAddOptions authpb.UserAddOptions
+
+type Auth interface {
+ Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error)
+ AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
+ AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
+ AuthStatus(ctx context.Context) (*AuthStatusResponse, error)
+ UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
+ UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error)
+ UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
+ UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
+ UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
+ UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
+ UserList(ctx context.Context) (*AuthUserListResponse, error)
+ UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
+ RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
+ RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
+ RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
+ RoleList(ctx context.Context) (*AuthRoleListResponse, error)
+ RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
+ RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
+}
+
+type authClient struct {
+ remote pb.AuthClient
+ callOpts []grpc.CallOption
+}
+
+func NewAuth(c *Client) Auth {
+ api := &authClient{remote: RetryAuthClient(c)}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth {
+ api := &authClient{remote: remote}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
+ resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
+ return (*AuthenticateResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
+ resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
+ return (*AuthEnableResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
+ resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
+ return (*AuthDisableResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) {
+ resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...)
+ return (*AuthStatusResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
+ resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...)
+ return (*AuthUserAddResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) {
+ resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...)
+ return (*AuthUserAddResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
+ resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
+ return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
+ resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
+ return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
+ resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
+ return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
+ resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
+ return (*AuthUserGetResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) {
+ resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
+ return (*AuthUserListResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
+ resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
+ return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
+}
+
+// RoleAdd ok
+func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
+ resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
+ return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
+}
+
+// RoleGrantPermission ok
+func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
+ perm := &authpb.Permission{
+ Key: key,
+ RangeEnd: rangeEnd,
+ PermType: authpb.Permission_Type(permType),
+ }
+ resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
+ return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
+}
+
+// RoleGet ok
+func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
+ resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
+ return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
+}
+
+// RoleList ok
+func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
+ resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
+ return (*AuthRoleListResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
+ resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: string(key), RangeEnd: string(rangeEnd)}, auth.callOpts...)
+ return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
+}
+
+func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
+ resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
+ return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
+}
+
+func StrToPermissionType(s string) (PermissionType, error) {
+ val, ok := authpb.PermissionTypeValue[strings.ToUpper(s)]
+ if ok {
+ return PermissionType(val), nil
+ }
+ return PermissionType(-1), fmt.Errorf("无效的权限类型: %s", s)
+}
diff --git a/client_sdk/v3/client.go b/client_sdk/v3/client.go
new file mode 100644
index 00000000000..463565c41ac
--- /dev/null
+++ b/client_sdk/v3/client.go
@@ -0,0 +1,573 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/credentials"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/internal/endpoint"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/internal/resolver"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ grpccredentials "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/status"
+)
+
+var (
+ ErrNoAvailableEndpoints = errors.New("etcdclient: 端点不可用")
+ ErrOldCluster = errors.New("etcdclient: 旧的集群版本")
+)
+
+// Client 提供并管理一个etcd v3客户端会话.
+type Client struct {
+ Cluster
+ KV
+ Lease
+ Watcher
+ Auth
+ Maintenance
+ conn *grpc.ClientConn
+ cfg Config // 配置信息
+ creds grpccredentials.TransportCredentials // 证书信息
+ resolver *resolver.EtcdManualResolver
+ mu *sync.RWMutex
+ ctx context.Context // 上下文
+ cancel context.CancelFunc // 上下文 cancel func
+ Username string
+ Password string
+ authTokenBundle credentials.Bundle
+ callOpts []grpc.CallOption
+ lgMu *sync.RWMutex
+ lg *zap.Logger
+}
+
+// New 创建一个client用于与etcd server 通信
+func New(cfg Config) (*Client, error) {
+ if len(cfg.Endpoints) == 0 {
+ return nil, ErrNoAvailableEndpoints
+ }
+
+ return newClient(&cfg)
+}
+
+// NewCtxClient creates a client with a context but no underlying grpc
+// connection. This is useful for embedded cases that override the
+// service interface implementations and do not need connection management.
+func NewCtxClient(ctx context.Context, opts ...Option) *Client {
+ cctx, cancel := context.WithCancel(ctx)
+ c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex)}
+ for _, opt := range opts {
+ opt(c)
+ }
+ if c.lg == nil {
+ c.lg = zap.NewNop()
+ }
+ return c
+}
+
+// Option is a function type that can be passed as argument to NewCtxClient to configure client
+type Option func(*Client)
+
+// WithZapLogger is a NewCtxClient option that overrides the logger
+func WithZapLogger(lg *zap.Logger) Option {
+ return func(c *Client) {
+ c.lg = lg
+ }
+}
+
+// WithLogger overrides the logger.
+//
+// Deprecated: Please use WithZapLogger or Logger field in clientv3.Config
+//
+// Does not changes grpcLogger, that can be explicitly configured
+// using grpc_zap.ReplaceGrpcLoggerV2(..) method.
+func (c *Client) WithLogger(lg *zap.Logger) *Client {
+ c.lgMu.Lock()
+ c.lg = lg
+ c.lgMu.Unlock()
+ return c
+}
+
+// GetLogger gets the logger.
+// NOTE: This method is for internal use of etcd-client library and should not be used as general-purpose logger.
+func (c *Client) GetLogger() *zap.Logger {
+ c.lgMu.RLock()
+ l := c.lg
+ c.lgMu.RUnlock()
+ return l
+}
+
+// Close shuts down the client's etcd connections.
+func (c *Client) Close() error {
+ c.cancel()
+ if c.Watcher != nil {
+ c.Watcher.Close()
+ }
+ if c.Lease != nil {
+ c.Lease.Close()
+ }
+ if c.conn != nil {
+ return toErr(c.ctx, c.conn.Close())
+ }
+ return c.ctx.Err()
+}
+
+func (c *Client) Ctx() context.Context { return c.ctx }
+
+// Dial connects to a single endpoint using the client's config.
+func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
+ creds := c.credentialsForEndpoint(ep)
+
+ // Using ad-hoc created resolver, to guarantee only explicitly given
+ // endpoint is used.
+ return c.dial(creds, grpc.WithResolvers(resolver.New(ep)))
+}
+
+// roundRobinQuorumBackoff retries against quorum between each backoff.
+// This is intended for use with a round robin load balancer.
+func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
+ return func(attempt uint) time.Duration {
+ // after each round robin across quorum, backoff for our wait between duration
+ n := uint(len(c.Endpoints()))
+ quorum := (n/2 + 1)
+ if attempt%quorum == 0 {
+ c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
+ return jitterUp(waitBetween, jitterFraction)
+ }
+ c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
+ return 0
+ }
+}
+
+// --------------------------------------------- OVER ------------------------------------------------------------
+
+func (c *Client) SetEndpoints(eps ...string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.cfg.Endpoints = eps
+ c.resolver.SetEndpoints(eps)
+}
+
+// Sync 将客户端的端点与来自etcd成员的已知端点进行同步.
+func (c *Client) Sync(ctx context.Context) error {
+ mresp, err := c.MemberList(ctx)
+ if err != nil {
+ return err
+ }
+ var eps []string
+ for _, m := range mresp.Members {
+ eps = append(eps, m.ClientURLs...)
+ }
+ c.SetEndpoints(eps...)
+ return nil
+}
+
+func (c *Client) autoSync() {
+ if c.cfg.AutoSyncInterval == time.Duration(0) {
+ return
+ }
+
+ for {
+ select {
+ case <-c.ctx.Done():
+ return
+ case <-time.After(c.cfg.AutoSyncInterval):
+ ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
+ err := c.Sync(ctx)
+ cancel()
+ if err != nil && err != c.ctx.Err() {
+ c.lg.Info("Auto sync endpoints failed.", zap.Error(err))
+ }
+ }
+ }
+}
+
+// dialSetupOpts 链接参数
+func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
+ if c.cfg.DialKeepAliveTime > 0 {
+ params := keepalive.ClientParameters{
+ Time: c.cfg.DialKeepAliveTime,
+ Timeout: c.cfg.DialKeepAliveTimeout,
+ PermitWithoutStream: c.cfg.PermitWithoutStream,
+ }
+ opts = append(opts, grpc.WithKeepaliveParams(params))
+ }
+ opts = append(opts, dopts...)
+
+ if creds != nil {
+ opts = append(opts, grpc.WithTransportCredentials(creds))
+ } else {
+ opts = append(opts, grpc.WithInsecure())
+ }
+
+ // Interceptor retry and backoff.
+ // TODO: Replace all of clientv3/retry.go with RetryPolicy:
+ // https://github.com/grpc/grpc-proto/blob/cdd9ed5c3d3f87aef62f373b93361cf7bddc620d/grpc/service_config/service_config.proto#L130
+ rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
+ opts = append(opts,
+ // Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
+ // Streams that are safe to retry are enabled individually.
+ grpc.WithStreamInterceptor(c.streamClientInterceptor(withMax(0), rrBackoff)),
+ grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(defaultUnaryMaxRetries), rrBackoff)),
+ )
+
+ return opts, nil
+}
+
+// 检查服务端版本
+func (c *Client) checkVersion() (err error) {
+ var wg sync.WaitGroup
+
+ eps := c.Endpoints()
+ errc := make(chan error, len(eps))
+ ctx, cancel := context.WithCancel(c.ctx)
+ if c.cfg.DialTimeout > 0 {
+ cancel()
+ ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+ }
+
+ wg.Add(len(eps))
+ for _, ep := range eps {
+ // 如果集群是当前的任何端点都会给出一个最新的版本
+ go func(e string) {
+ defer wg.Done()
+ resp, rerr := c.Status(ctx, e)
+ if rerr != nil {
+ errc <- rerr
+ return
+ }
+ vs := strings.Split(resp.Version, ".") // [3 5 2]
+ maj, min := 0, 0
+ if len(vs) >= 2 {
+ var serr error
+ if maj, serr = strconv.Atoi(vs[0]); serr != nil {
+ errc <- serr
+ return
+ }
+ if min, serr = strconv.Atoi(vs[1]); serr != nil {
+ errc <- serr
+ return
+ }
+ }
+ // 3.2版本以下
+ if maj < 3 || (maj == 3 && min < 2) {
+ rerr = ErrOldCluster
+ }
+ errc <- rerr
+ }(ep)
+ }
+ for range eps {
+ if err = <-errc; err == nil {
+ break
+ }
+ }
+ cancel()
+ wg.Wait()
+ return err
+}
+
+func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
+
+// isHaltErr 如果给定的错误和上下文表明无法取得进展甚至在重新连接后返回true.
+func isHaltErr(ctx context.Context, err error) bool {
+ if ctx != nil && ctx.Err() != nil {
+ return true
+ }
+ if err == nil {
+ return false
+ }
+ ev, _ := status.FromError(err)
+ return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
+}
+
+// isUnavailableErr 返回错误是不是 不可用 类型
+func isUnavailableErr(ctx context.Context, err error) bool {
+ if ctx != nil && ctx.Err() != nil {
+ return false
+ }
+ if err == nil {
+ return false
+ }
+ ev, ok := status.FromError(err)
+ if ok {
+ // Unavailable codes mean the system will be right back.
+ // (e.g., can't connect, lost leader)
+ return ev.Code() == codes.Unavailable
+ }
+ return false
+}
+
+func toErr(ctx context.Context, err error) error {
+ if err == nil {
+ return nil
+ }
+ err = rpctypes.Error(err)
+ if _, ok := err.(rpctypes.EtcdError); ok {
+ return err
+ }
+ if ev, ok := status.FromError(err); ok {
+ code := ev.Code()
+ switch code {
+ case codes.DeadlineExceeded:
+ fallthrough
+ case codes.Canceled:
+ if ctx.Err() != nil {
+ err = ctx.Err()
+ }
+ }
+ }
+ return err
+}
+
+func canceledByCaller(stopCtx context.Context, err error) bool {
+ if stopCtx.Err() == nil || err == nil {
+ return false
+ }
+
+ return err == context.Canceled || err == context.DeadlineExceeded
+}
+
+// IsConnCanceled returns true, if error is from a closed gRPC connection.
+// ref. https://github.com/grpc/grpc-go/pull/1854
+func IsConnCanceled(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // >= gRPC v1.23.x
+ s, ok := status.FromError(err)
+ if ok {
+ // connection is canceled or etcd has already closed the connection
+ return s.Code() == codes.Canceled || s.Message() == "transport is closing"
+ }
+
+ // >= gRPC v1.10.x
+ if err == context.Canceled {
+ return true
+ }
+
+ // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
+ return strings.Contains(err.Error(), "grpc: the client connection is closing")
+}
+
+func (c *Client) Endpoints() []string {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ eps := make([]string, len(c.cfg.Endpoints))
+ copy(eps, c.cfg.Endpoints)
+ return eps
+}
+
+// OK
+func (c *Client) getToken(ctx context.Context) error {
+ var err error
+
+ if c.Username == "" || c.Password == "" {
+ return nil
+ }
+
+ resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password)
+ if err != nil {
+ if err == rpctypes.ErrAuthNotEnabled {
+ return nil
+ }
+ return err
+ }
+ c.authTokenBundle.UpdateAuthToken(resp.Token)
+ return nil
+}
+
+// OK
+func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ creds := c.credentialsForEndpoint(c.Endpoints()[0]) // 根据第一个判断是不需要证书
+ opts := append(dopts, grpc.WithResolvers(c.resolver))
+ return c.dial(creds, opts...)
+}
+
+// OK
+func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ opts, err := c.dialSetupOpts(creds, dopts...)
+ if err != nil {
+ return nil, fmt.Errorf("配置dialer失败: %v", err)
+ }
+ if c.Username != "" && c.Password != "" {
+ c.authTokenBundle = credentials.NewBundle(credentials.Config{})
+ opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
+ }
+
+ opts = append(opts, c.cfg.DialOptions...)
+
+ dctx := c.ctx
+ if c.cfg.DialTimeout > 0 {
+ var cancel context.CancelFunc
+ dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+ defer cancel()
+ }
+ target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.Endpoints()[0]))
+ conn, err := grpc.DialContext(dctx, target, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// 返回地址
+func authority(endpoint string) string {
+ spl := strings.SplitN(endpoint, "://", 2)
+ if len(spl) < 2 {
+ if strings.HasPrefix(endpoint, "unix:") {
+ return endpoint[len("unix:"):]
+ }
+ if strings.HasPrefix(endpoint, "unixs:") {
+ return endpoint[len("unixs:"):]
+ }
+ return endpoint
+ }
+ return spl[1]
+}
+
+// OK
+func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials {
+ r := endpoint.RequiresCredentials(ep) // 127.0.0.1:2379
+ switch r {
+ case endpoint.CREDS_DROP:
+ return nil
+ case endpoint.CREDS_OPTIONAL:
+ return c.creds
+ case endpoint.CREDS_REQUIRE:
+ if c.creds != nil {
+ return c.creds
+ }
+ return credentials.NewBundle(credentials.Config{}).TransportCredentials()
+ default:
+ panic(fmt.Errorf("unsupported CredsRequirement: %v", r))
+ }
+}
+
+// 创建一个client用于与etcd server 通信
+func newClient(cfg *Config) (*Client, error) {
+ if cfg == nil {
+ cfg = &Config{}
+ }
+ var creds grpccredentials.TransportCredentials
+ if cfg.TLS != nil {
+ creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
+ }
+
+ // 使用一个临时的客户端来启动第一个连接
+ baseCtx := context.TODO()
+ if cfg.Context != nil {
+ baseCtx = cfg.Context
+ }
+
+ ctx, cancel := context.WithCancel(baseCtx)
+ client := &Client{
+ conn: nil,
+ cfg: *cfg,
+ creds: creds,
+ ctx: ctx,
+ cancel: cancel,
+ mu: new(sync.RWMutex),
+ callOpts: defaultCallOpts,
+ lgMu: new(sync.RWMutex),
+ }
+
+ var err error
+ if cfg.Logger != nil {
+ client.lg = cfg.Logger
+ } else if cfg.LogConfig != nil {
+ client.lg, err = cfg.LogConfig.Build()
+ } else {
+ client.lg, err = CreateDefaultZapLogger()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if cfg.Username != "" && cfg.Password != "" {
+ client.Username = cfg.Username
+ client.Password = cfg.Password
+ }
+ if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
+ if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
+ return nil, fmt.Errorf("gRPC消息接收大小 (%d bytes)必须是大于发送的 (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
+ }
+ callOpts := []grpc.CallOption{
+ defaultWaitForReady,
+ defaultMaxCallSendMsgSize,
+ defaultMaxCallRecvMsgSize,
+ }
+ if cfg.MaxCallSendMsgSize > 0 {
+ callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
+ }
+ if cfg.MaxCallRecvMsgSize > 0 {
+ callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
+ }
+ client.callOpts = callOpts
+ }
+ client.resolver = resolver.New(cfg.Endpoints...)
+
+ if len(cfg.Endpoints) < 1 {
+ client.cancel()
+ return nil, fmt.Errorf("至少需要一个端点")
+ }
+
+ conn, err := client.dialWithBalancer()
+ if err != nil {
+ client.cancel()
+ client.resolver.Close()
+ return nil, err
+ }
+ client.conn = conn
+
+ client.Cluster = NewCluster(client)
+ client.KV = NewKV(client)
+ client.Lease = NewLease(client)
+ client.Watcher = NewWatcher(client)
+ client.Auth = NewAuth(client)
+ client.Maintenance = NewMaintenance(client)
+
+ // 获得已建立连接的令牌
+ ctx, cancel = client.ctx, func() {}
+ if client.cfg.DialTimeout > 0 {
+ ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout)
+ }
+ err = client.getToken(ctx)
+ if err != nil {
+ client.Close()
+ cancel()
+ return nil, err
+ }
+ cancel()
+ if cfg.RejectOldCluster { // false
+ if err := client.checkVersion(); err != nil {
+ client.Close()
+ return nil, err
+ }
+ }
+
+ go client.autoSync()
+ return client, nil
+}
diff --git a/client_sdk/v3/cluster.go b/client_sdk/v3/cluster.go
new file mode 100644
index 00000000000..4956a1b5216
--- /dev/null
+++ b/client_sdk/v3/cluster.go
@@ -0,0 +1,130 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "google.golang.org/grpc"
+)
+
+type (
+ Member pb.Member
+ MemberListResponse pb.MemberListResponse
+ MemberAddResponse pb.MemberAddResponse
+ MemberRemoveResponse pb.MemberRemoveResponse
+ MemberUpdateResponse pb.MemberUpdateResponse
+ MemberPromoteResponse pb.MemberPromoteResponse
+)
+
+type Cluster interface {
+ MemberList(ctx context.Context) (*MemberListResponse, error)
+ MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+ MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+ MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
+ MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
+ MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error)
+}
+
+type cluster struct {
+ remote pb.ClusterClient
+ callOpts []grpc.CallOption
+}
+
+func NewCluster(c *Client) Cluster {
+ api := &cluster{remote: RetryClusterClient(c)}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
+ api := &cluster{remote: remote}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+ return c.memberAdd(ctx, peerAddrs, false)
+}
+
+func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+ return c.memberAdd(ctx, peerAddrs, true)
+}
+
+func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) {
+ // fail-fast before panic in rafthttp
+ if _, err := types.NewURLs(peerAddrs); err != nil {
+ return nil, err
+ }
+
+ r := &pb.MemberAddRequest{
+ PeerURLs: peerAddrs,
+ IsLearner: isLearner,
+ }
+ resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ return (*MemberAddResponse)(resp), nil
+}
+
+func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
+ r := &pb.MemberRemoveRequest{ID: id}
+ resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ return (*MemberRemoveResponse)(resp), nil
+}
+
+func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
+ // fail-fast before panic in rafthttp
+ if _, err := types.NewURLs(peerAddrs); err != nil {
+ return nil, err
+ }
+
+ // it is safe to retry on update.
+ r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
+ resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
+ if err == nil {
+ return (*MemberUpdateResponse)(resp), nil
+ }
+ return nil, toErr(ctx, err)
+}
+
+func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
+ // it is safe to retry on list.
+ resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: true}, c.callOpts...)
+ if err == nil {
+ return (*MemberListResponse)(resp), nil
+ }
+ return nil, toErr(ctx, err)
+}
+
+func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
+ r := &pb.MemberPromoteRequest{ID: id}
+ resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ return (*MemberPromoteResponse)(resp), nil
+}
diff --git a/client/v3/compact_op.go b/client_sdk/v3/compact_op.go
similarity index 93%
rename from client/v3/compact_op.go
rename to client_sdk/v3/compact_op.go
index a6e660aa825..4bc3e323c09 100644
--- a/client/v3/compact_op.go
+++ b/client_sdk/v3/compact_op.go
@@ -15,7 +15,7 @@
package clientv3
import (
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
)
// CompactOp represents a compact operation.
@@ -45,7 +45,7 @@ func (op CompactOp) toRequest() *pb.CompactionRequest {
}
// WithCompactPhysical makes Compact wait until all compacted entries are
-// removed from the etcd server's storage.
+// removed from the etcd etcd's storage.
func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true }
}
diff --git a/client_sdk/v3/compare.go b/client_sdk/v3/compare.go
new file mode 100644
index 00000000000..dc928e5213e
--- /dev/null
+++ b/client_sdk/v3/compare.go
@@ -0,0 +1,144 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+type (
+ CompareTarget int
+ CompareResult int
+)
+
+const (
+ CompareVersion CompareTarget = iota
+ CompareCreated
+ CompareModified
+ CompareValue
+)
+
+type Cmp pb.Compare
+
+func Compare(cmp Cmp, result string, v interface{}) Cmp {
+ var r pb.Compare_CompareResult
+
+ switch result {
+ case "=":
+ r = pb.Compare_EQUAL
+ case "!=":
+ r = pb.Compare_NOT_EQUAL
+ case ">":
+ r = pb.Compare_GREATER
+ case "<":
+ r = pb.Compare_LESS
+ default:
+ panic("Unknown result op")
+ }
+
+ cmp.Result = r
+ switch cmp.Target {
+ case pb.Compare_VALUE:
+ val, ok := v.(string)
+ if !ok {
+ panic("bad compare value")
+ }
+ cmp.Compare_Value = &pb.Compare_Value{Value: val}
+ case pb.Compare_VERSION:
+ cmp.Compare_Version = &pb.Compare_Version{Version: mustInt64(v)}
+ case pb.Compare_CREATE:
+ cmp.Compare_CreateRevision = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
+ case pb.Compare_MOD:
+ cmp.Compare_ModRevision = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
+ case pb.Compare_LEASE:
+ cmp.Compare_Lease = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
+ default:
+ panic("Unknown compare type")
+ }
+ return cmp
+}
+
+func Value(key string) Cmp {
+ return Cmp{Key: key, Target: pb.Compare_VALUE}
+}
+
+func Version(key string) Cmp {
+ return Cmp{Key: key, Target: pb.Compare_VERSION}
+}
+
+func CreateRevision(key string) Cmp {
+ return Cmp{Key: key, Target: pb.Compare_CREATE}
+}
+
+func ModRevision(key string) Cmp {
+ return Cmp{Key: key, Target: pb.Compare_MOD}
+}
+
+// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
+// LeaseID is 0, otherwise known as `NoLease`.
+func LeaseValue(key string) Cmp {
+ return Cmp{Key: key, Target: pb.Compare_LEASE}
+}
+
+// KeyBytes returns the byte slice holding with the comparison key.
+func (cmp *Cmp) KeyBytes() []byte { return []byte(cmp.Key) }
+
+// WithKeyBytes sets the byte slice for the comparison key.
+func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = string(key) }
+
+// ValueBytes returns the byte slice holding the comparison value, if any.
+func (cmp *Cmp) ValueBytes() []byte {
+ if cmp.Compare_Value != nil {
+ return []byte(cmp.Compare_Value.Value)
+ }
+ return nil
+}
+
+// WithValueBytes sets the byte slice for the comparison's value.
+func (cmp *Cmp) WithValueBytes(v []byte) {
+ cmp.Compare_Value.Value = string(v)
+}
+
+// WithRange sets the comparison to scan the range [key, end).
+func (cmp Cmp) WithRange(end string) Cmp {
+ cmp.RangeEnd = end
+ return cmp
+}
+
+// WithPrefix sets the comparison to scan all keys prefixed by the key.
+func (cmp Cmp) WithPrefix() Cmp {
+ cmp.RangeEnd = getPrefix(cmp.Key)
+ return cmp
+}
+
+// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
+func mustInt64(val interface{}) int64 {
+ if v, ok := val.(int64); ok {
+ return v
+ }
+ if v, ok := val.(int); ok {
+ return int64(v)
+ }
+ panic("bad value")
+}
+
+// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
+// int64 otherwise.
+func mustInt64orLeaseID(val interface{}) int64 {
+ if v, ok := val.(LeaseID); ok {
+ return int64(v)
+ }
+ return mustInt64(val)
+}
diff --git a/client_sdk/v3/concurrency/distribted_mutex.go b/client_sdk/v3/concurrency/distribted_mutex.go
new file mode 100644
index 00000000000..1e325988e87
--- /dev/null
+++ b/client_sdk/v3/concurrency/distribted_mutex.go
@@ -0,0 +1,236 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+// ErrLocked is returned by TryLock when Mutex is already locked by another session.
+var ErrLocked = errors.New("mutex: Locked by another session")
+var ErrSessionExpired = errors.New("mutex: session is expired")
+
+// Mutex implements the sync Locker interface with etcd
+// 即前缀机制,也称目录机制,例如,一个名为 `/mylock` 的锁,两个争抢它的客户端进行写操作,
+// 实际写入的Key分别为:`key1="/mylock/UUID1"`,`key2="/mylock/UUID2"`,
+// 其中,UUID表示全局唯一的ID,确保两个Key的唯一性.很显然,写操作都会成功,但返回的Revision不一样,
+// 那么,如何判断谁获得了锁呢?通过前缀`“/mylock"`查询,返回包含两个Key-Value对的Key-Value列表,
+// 同时也包含它们的Revision,通过Revision大小,客户端可以判断自己是否获得锁,如果抢锁失败,则等待锁释放(对应的 Key 被删除或者租约过期),
+// 然后再判断自己是否可以获得锁.
+type Mutex struct {
+ s *Session
+
+ pfx string // 前缀
+ myKey string // key
+ myRev int64 // 当前的修订版本
+ hdr *pb.ResponseHeader
+}
+
+// NewMutex 通过session和锁前缀
+func NewMutex(s *Session, pfx string) *Mutex {
+ return &Mutex{s, pfx + "/", "", -1, nil}
+}
+
+// TryLock locks the mutex if not already locked by another session.
+// If lock is held by another session, return immediately after attempting necessary cleanup
+// The ctx argument is used for the sending/receiving Txn RPC.
+func (m *Mutex) TryLock(ctx context.Context) error {
+ resp, err := m.tryAcquire(ctx)
+ if err != nil {
+ return err
+ }
+ // if no key on prefix / the minimum rev is key, already hold the lock
+ ownerKey := resp.Responses[1].GetResponseRange().Kvs
+ if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+ m.hdr = resp.Header
+ return nil
+ }
+ client := m.s.Client()
+ // Cannot lock, so delete the key
+ if _, err := client.Delete(ctx, m.myKey); err != nil {
+ return err
+ }
+ m.myKey = "\x00"
+ m.myRev = -1
+ return ErrLocked
+}
+
+// Lock locks the mutex with a cancelable context. If the context is canceled
+// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
+// Lock 使用可取消的context锁定互斥锁.如果context被取消
+// 在尝试获取锁时,互斥锁会尝试清除其过时的锁条目.
+func (m *Mutex) Lock(ctx context.Context) error {
+ resp, err := m.tryAcquire(ctx)
+ if err != nil {
+ return err
+ }
+ // if no key on prefix / the minimum rev is key, already hold the lock
+ // 通过对比自身的revision和最先创建的key的revision得出谁获得了锁
+ // 例如 自身revision:5,最先创建的key createRevision:3 那么不获得锁,进入waitDeletes
+ // 自身revision:5,最先创建的key createRevision:5 那么获得锁
+ ownerKey := resp.Responses[1].GetResponseRange().Kvs
+ if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+ m.hdr = resp.Header
+ return nil
+ }
+ client := m.s.Client()
+ // 等待其他程序释放锁,并删除其他revisions
+ // 通过 Watch 机制各自监听 prefix 相同,revision 比自己小的 key,因为只有 revision 比自己小的 key 释放锁,
+ // 我才能有机会,获得锁,如下代码所示,其中 waitDelete 会使用我们上面的介绍的 Watch 去监听比自己小的 key,详细代码可参考concurrency mutex的实现.
+ _, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) // 监听前缀,上删除的 修订版本之前的kv
+ // release lock key if wait failed
+ if werr != nil {
+ m.Unlock(client.Ctx())
+ return werr
+ }
+
+ // make sure the session is not expired, and the owner key still exists.
+ gresp, werr := client.Get(ctx, m.myKey)
+ if werr != nil {
+ m.Unlock(client.Ctx())
+ return werr
+ }
+
+ if len(gresp.Kvs) == 0 { // is the session key lost?
+ return ErrSessionExpired
+ }
+ m.hdr = gresp.Header
+
+ return nil
+}
+
+func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
+ s := m.s
+ client := m.s.Client()
+ // s.Lease()租约
+ // 生成锁的key
+ m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) // /my-lock/sfhskjdhfksfhalsklfhksdf
+ // 核心还是使用了我们上面介绍的事务和 Lease 特性,当 CreateRevision 为 0 时,
+ // 它会创建一个 prefix 为 /my-lock 的 key ,并获取到 /my-lock prefix下面最早创建的一个 key(revision 最小),
+ // 分布式锁最终是由写入此 key 的 client 获得,其他 client 则进入等待模式.
+ //
+ //
+ // 使用事务机制
+ // 比较key的revision为0(0标示没有key)
+ cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
+ // 则put key,并设置租约
+ put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
+ // 否则 获取这个key,重用租约中的锁(这里主要目的是在于重入)
+ // 通过第二次获取锁,判断锁是否存在来支持重入
+ // 所以只要租约一致,那么是可以重入的.
+ get := v3.OpGet(m.myKey)
+ // 通过前缀获取最先创建的key
+ getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
+ // 这里是比较的逻辑,如果等于0,写入当前的key,否则则读取这个key
+ // 大佬的代码写的就是奇妙
+ resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
+ if err != nil {
+ return nil, err
+ }
+ //{
+ // "header":{
+ // "cluster_id":14841639068965178418,
+ // "member_id":10276657743932975437,
+ // "Revision":6,
+ // "raft_term":2
+ // },
+ // "succeeded":true,
+ // "responses":[
+ // {
+ // "ResponseOp_ResponsePut":{
+ // "response_put":{
+ // "header":{
+ // "Revision":6
+ // }
+ // }
+ // }
+ // },
+ // {
+ // "ResponseOp_ResponseRange":{
+ // "response_range":{
+ // "header":{
+ // "Revision":6
+ // },
+ // "kvs":[
+ // {
+ // "key":"/my-lock//694d805a644b7a0d",
+ // "create_revision":6,
+ // "mod_revision":6,
+ // "version":1,
+ // "lease":7587862072907233805
+ // }
+ // ],
+ // "count":1
+ // }
+ // }
+ // }
+ // ]
+ //}
+ //marshal, _ := json.Marshal(resp)
+ //fmt.Println(string(marshal))
+ // 获取到自身的revision(注意,此处CreateRevision和Revision不一定相等)
+ m.myRev = resp.Header.Revision
+ if !resp.Succeeded {
+ m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
+ }
+ return resp, nil
+}
+
+func (m *Mutex) Unlock(ctx context.Context) error {
+ client := m.s.Client()
+ if _, err := client.Delete(ctx, m.myKey); err != nil {
+ return err
+ }
+ m.myKey = "\x00"
+ m.myRev = -1
+ return nil
+}
+
+func (m *Mutex) IsOwner() v3.Cmp {
+ return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
+}
+
+func (m *Mutex) Key() string { return m.myKey }
+
+// Header is the response header received from etcd on acquiring the lock.
+func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
+
+type lockerMutex struct{ *Mutex }
+
+func (lm *lockerMutex) Lock() {
+ client := lm.s.Client()
+ if err := lm.Mutex.Lock(client.Ctx()); err != nil {
+ panic(err)
+ }
+}
+
+func (lm *lockerMutex) Unlock() {
+ client := lm.s.Client()
+ if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
+ panic(err)
+ }
+}
+
+// NewLocker creates a sync.Locker backed by an etcd mutex.
+func NewLocker(s *Session, pfx string) sync.Locker {
+ return &lockerMutex{NewMutex(s, pfx)}
+}
diff --git a/client/v3/concurrency/doc.go b/client_sdk/v3/concurrency/doc.go
similarity index 100%
rename from client/v3/concurrency/doc.go
rename to client_sdk/v3/concurrency/doc.go
diff --git a/client_sdk/v3/concurrency/election.go b/client_sdk/v3/concurrency/election.go
new file mode 100644
index 00000000000..df7f21af7b5
--- /dev/null
+++ b/client_sdk/v3/concurrency/election.go
@@ -0,0 +1,239 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+var (
+ ErrElectionNotLeader = errors.New("election: not leader")
+ ErrElectionNoLeader = errors.New("election: no leader")
+)
+
+type Election struct {
+ session *Session
+ keyPrefix string
+ leaderKey string
+ leaderRev int64
+ leaderSession *Session
+ hdr *pb.ResponseHeader
+}
+
+// NewElection 返回给定关键字前缀上的新选举结果.
+func NewElection(s *Session, pfx string) *Election {
+ return &Election{session: s, keyPrefix: pfx + "/"}
+}
+
+// ResumeElection initializes an election with a known leader.
+func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
+ return &Election{
+ keyPrefix: pfx,
+ session: s,
+ leaderKey: leaderKey,
+ leaderRev: leaderRev,
+ leaderSession: s,
+ }
+}
+
+// Campaign 在前缀键上放置一个符合选举条件的值.
+// 对于同一个前缀,多个会议可以参与选举,但一次只能有一个领导人.
+// 如果context是'context. todo ()/context. background ()', Campaign将继续被阻塞,以便其他key被删除,除非etcd返回一个不可恢复的错误(例如ErrCompacted).
+// 否则,直到上下文没有被取消或超时,Campaign将继续被阻塞,直到它成为leader.
+func (e *Election) Campaign(ctx context.Context, val string) error {
+ s := e.session
+ client := e.session.Client()
+
+ k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
+ txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
+ txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
+ txn = txn.Else(v3.OpGet(k))
+ resp, err := txn.Commit()
+ if err != nil {
+ return err
+ }
+ e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
+ if !resp.Succeeded {
+ kv := resp.Responses[0].GetResponseRange().Kvs[0]
+ e.leaderRev = kv.CreateRevision
+ if kv.Value != val {
+ if err = e.Proclaim(ctx, val); err != nil {
+ e.Resign(ctx)
+ return err
+ }
+ }
+ }
+
+ _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
+ if err != nil {
+ // 在上下文取消的情况下清理
+ select {
+ case <-ctx.Done():
+ e.Resign(client.Ctx())
+ default:
+ e.leaderSession = nil
+ }
+ return err
+ }
+ e.hdr = resp.Header
+
+ return nil
+}
+
+// Proclaim 让leader宣布一个新的值,而不需要一次选举.
+func (e *Election) Proclaim(ctx context.Context, val string) error {
+ if e.leaderSession == nil {
+ return ErrElectionNotLeader
+ }
+ client := e.session.Client()
+ cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+ txn := client.Txn(ctx).If(cmp)
+ txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
+ tresp, terr := txn.Commit()
+ if terr != nil {
+ return terr
+ }
+ if !tresp.Succeeded {
+ e.leaderKey = ""
+ return ErrElectionNotLeader
+ }
+
+ e.hdr = tresp.Header
+ return nil
+}
+
+// Resign lets a leader start a new election.
+func (e *Election) Resign(ctx context.Context) (err error) {
+ if e.leaderSession == nil {
+ return nil
+ }
+ client := e.session.Client()
+ cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+ resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
+ if err == nil {
+ e.hdr = resp.Header
+ }
+ e.leaderKey = ""
+ e.leaderSession = nil
+ return err
+}
+
+// Leader returns the leader value for the current election.
+func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
+ client := e.session.Client()
+ resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+ if err != nil {
+ return nil, err
+ } else if len(resp.Kvs) == 0 {
+ // no leader currently elected
+ return nil, ErrElectionNoLeader
+ }
+ return resp, nil
+}
+
+// Observe 返回一个通道,该通道可靠地观察有序的leader proposal 作为响应
+func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
+ retc := make(chan v3.GetResponse)
+ go e.observe(ctx, retc)
+ return retc
+}
+
+// 观察 节点变更
+func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
+ client := e.session.Client()
+
+ defer close(ch)
+ for {
+ resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+ if err != nil {
+ return
+ }
+
+ var kv *mvccpb.KeyValue
+ var hdr *pb.ResponseHeader
+
+ if len(resp.Kvs) == 0 {
+ cctx, cancel := context.WithCancel(ctx)
+ // 等待在这个前缀更新第一个值wait for first key put on prefix
+ opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
+ wch := client.Watch(cctx, e.keyPrefix, opts...)
+ for kv == nil {
+ wr, ok := <-wch
+ if !ok || wr.Err() != nil {
+ cancel()
+ return
+ }
+ // only accept puts; a delete will make observe() spin
+ // 只接受put;删除操作将使observe()重试
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.PUT {
+ hdr, kv = &wr.Header, ev.Kv
+ // may have multiple revs; hdr.rev = the last rev
+ // set to kv's rev in case batch has multiple Puts
+ hdr.Revision = kv.ModRevision
+ break
+ }
+ }
+ }
+ cancel()
+ } else {
+ hdr, kv = resp.Header, resp.Kvs[0]
+ }
+
+ select {
+ case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
+ case <-ctx.Done():
+ return
+ }
+
+ cctx, cancel := context.WithCancel(ctx)
+ wch := client.Watch(cctx, kv.Key, v3.WithRev(hdr.Revision+1))
+ keyDeleted := false
+ for !keyDeleted {
+ wr, ok := <-wch
+ if !ok {
+ cancel()
+ return
+ }
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.DELETE {
+ keyDeleted = true
+ break
+ }
+ resp.Header = &wr.Header
+ resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
+ select {
+ case ch <- *resp:
+ case <-cctx.Done():
+ cancel()
+ return
+ }
+ }
+ }
+ cancel()
+ }
+}
+
+func (e *Election) Key() string { return e.leaderKey }
+
+func (e *Election) Rev() int64 { return e.leaderRev }
+
+func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/client_sdk/v3/concurrency/key.go b/client_sdk/v3/concurrency/key.go
new file mode 100644
index 00000000000..7e93827af5d
--- /dev/null
+++ b/client_sdk/v3/concurrency/key.go
@@ -0,0 +1,70 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "fmt"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+// 从revision开始监听删除事件,因为revision存在,所以也避免了ABA问题
+func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
+ cctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ var wr v3.WatchResponse
+ wch := client.Watch(cctx, key, v3.WithRev(rev))
+ for wr = range wch {
+ for _, ev := range wr.Events {
+ // 遇到删除事件才返回
+ if ev.Type == mvccpb.DELETE {
+ return nil
+ }
+ }
+ }
+ if err := wr.Err(); err != nil {
+ return err
+ }
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return fmt.Errorf("lost watcher waiting for delete")
+}
+
+// 等待持有锁的key删除
+// 内部实现为等其他所有比当前createRevision小的key,监听删除事件
+func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
+ // WithLastCreate 按照CreateRevision排序,降序 例如 5 4 3 2 1
+ // WithMaxCreateRev 获取比maxCreateRev小的key
+ getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
+ for {
+ resp, err := client.Get(ctx, pfx, getOpts...)
+ if err != nil {
+ return nil, err
+ }
+ if len(resp.Kvs) == 0 {
+ return resp.Header, nil
+ }
+ lastKey := resp.Kvs[0].Key
+ // 等待该目录前缀下的所有k都被删掉
+ if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
+ return nil, err
+ }
+ }
+}
diff --git a/client/v3/concurrency/session.go b/client_sdk/v3/concurrency/session.go
similarity index 79%
rename from client/v3/concurrency/session.go
rename to client_sdk/v3/concurrency/session.go
index 8838b77e2d7..19d91aa5460 100644
--- a/client/v3/concurrency/session.go
+++ b/client_sdk/v3/concurrency/session.go
@@ -18,15 +18,15 @@ import (
"context"
"time"
- "go.uber.org/zap"
-
- v3 "go.etcd.io/etcd/client/v3"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
)
const defaultSessionTTL = 60
// Session represents a lease kept alive for the lifetime of a client.
// Fault-tolerant applications may use sessions to reason about liveness.
+// 会话表示在客户端的生存期内保持活动的租约.
+// 应用程序可能会使用会话来解释活动性.
type Session struct {
client *v3.Client
opts *sessionOptions
@@ -37,11 +37,11 @@ type Session struct {
}
// NewSession gets the leased session for a client.
+// 抽象出了一个session对象来持续保持租约不过期
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
- lg := client.GetLogger()
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
for _, opt := range opts {
- opt(ops, lg)
+ opt(ops)
}
id := ops.leaseID
@@ -54,6 +54,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
}
ctx, cancel := context.WithCancel(ops.ctx)
+ // 保证锁,在线程的活动期间,实现锁的的续租
keepAlive, err := client.KeepAlive(ctx, id)
if err != nil || keepAlive == nil {
cancel()
@@ -63,15 +64,19 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
donec := make(chan struct{})
s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
- // keep the lease alive until client error or cancelled context
+ // 在客户端错误或取消上下文之前保持租约的活动状态
go func() {
defer close(donec)
for range keepAlive {
- // eat messages until keep alive channel closes
+ // 在保持活动频道关闭前接收信息
}
}()
return s, nil
+ // 1、多个请求来前抢占锁,通过Revision来判断锁的先后顺序;
+ // 2、如果有比当前key的Revision小的Revision存在,说明有key已经获得了锁;
+ // 3、等待直到前面的key被删除,然后自己就获得了锁.
+ // 通过etcd实现的锁,直接包含了锁的续租,如果使用Redis还要自己去实现,相比较使用更简单.
}
// Client is the etcd client that is attached to the session.
@@ -111,16 +116,14 @@ type sessionOptions struct {
}
// SessionOption configures Session.
-type SessionOption func(*sessionOptions, *zap.Logger)
+type SessionOption func(*sessionOptions)
// WithTTL configures the session's TTL in seconds.
// If TTL is <= 0, the default 60 seconds TTL will be used.
func WithTTL(ttl int) SessionOption {
- return func(so *sessionOptions, lg *zap.Logger) {
+ return func(so *sessionOptions) {
if ttl > 0 {
so.ttl = ttl
- } else {
- lg.Warn("WithTTL(): TTL should be > 0, preserving current TTL", zap.Int64("current-session-ttl", int64(so.ttl)))
}
}
}
@@ -129,7 +132,7 @@ func WithTTL(ttl int) SessionOption {
// This is useful in process restart scenario, for example, to reclaim
// leadership from an election prior to restart.
func WithLease(leaseID v3.LeaseID) SessionOption {
- return func(so *sessionOptions, _ *zap.Logger) {
+ return func(so *sessionOptions) {
so.leaseID = leaseID
}
}
@@ -140,7 +143,7 @@ func WithLease(leaseID v3.LeaseID) SessionOption {
// context is canceled before Close() completes, the session's lease will be
// abandoned and left to expire instead of being revoked.
func WithContext(ctx context.Context) SessionOption {
- return func(so *sessionOptions, _ *zap.Logger) {
+ return func(so *sessionOptions) {
so.ctx = ctx
}
}
diff --git a/client/v3/concurrency/stm.go b/client_sdk/v3/concurrency/stm.go
similarity index 98%
rename from client/v3/concurrency/stm.go
rename to client_sdk/v3/concurrency/stm.go
index ba7303d0977..319856a4e6c 100644
--- a/client/v3/concurrency/stm.go
+++ b/client_sdk/v3/concurrency/stm.go
@@ -18,7 +18,7 @@ import (
"context"
"math"
- v3 "go.etcd.io/etcd/client/v3"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
)
// STM is an interface for software transactional memory.
@@ -33,7 +33,7 @@ type STM interface {
// Del deletes a key.
Del(key string)
- // commit attempts to apply the txn's changes to the server.
+ // commit attempts to apply the txn's changes to the etcd.
commit() *v3.TxnResponse
reset()
}
diff --git a/client_sdk/v3/config.go b/client_sdk/v3/config.go
new file mode 100644
index 00000000000..e801d48e8b5
--- /dev/null
+++ b/client_sdk/v3/config.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "crypto/tls"
+ "time"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+)
+
+type Config struct {
+ Endpoints []string `json:"endpoints"` // etcd client --> etcd 的地址
+ AutoSyncInterval time.Duration `json:"auto-sync-interval"` // 是用其最新成员更新端点的时间间隔.0禁止自动同步.默认情况下自动同步被禁用.
+ DialTimeout time.Duration `json:"dial-timeout"` // 建立链接的超时时间
+ DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` // client 向服务端发送发包确保链接存活
+ DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` // 长时间没有接收到响应关闭链接
+ MaxCallSendMsgSize int // 默认2MB
+ MaxCallRecvMsgSize int
+ TLS *tls.Config // 客户端sdk证书
+ Username string `json:"username"`
+ Password string `json:"password"`
+ RejectOldCluster bool `json:"reject-old-cluster"` // 是否拒绝老版本服务器
+
+ // DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
+ // For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
+ // Without this, Dial returns immediately and connecting the etcd happens in background.
+ DialOptions []grpc.DialOption
+
+ // Context is the default client context; it can be used to cancel grpc dial out and
+ // other operations that do not have an explicit context.
+ Context context.Context
+ Logger *zap.Logger
+ LogConfig *zap.Config
+
+ // PermitWithoutStream when set will allow client to send keepalive pings to etcd without any active streams(RPCs).
+ PermitWithoutStream bool `json:"permit-without-stream"`
+
+ // TODO: support custom balancer picker
+}
diff --git a/client/v3/credentials/credentials.go b/client_sdk/v3/credentials/credentials.go
similarity index 94%
rename from client/v3/credentials/credentials.go
rename to client_sdk/v3/credentials/credentials.go
index 024c16b6048..a724a08c83e 100644
--- a/client/v3/credentials/credentials.go
+++ b/client_sdk/v3/credentials/credentials.go
@@ -22,23 +22,21 @@ import (
"net"
"sync"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
grpccredentials "google.golang.org/grpc/credentials"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
-// Config defines gRPC credential configuration.
type Config struct {
TLSConfig *tls.Config
}
-// Bundle defines gRPC credential interface.
+// Bundle grpc认证接口
type Bundle interface {
grpccredentials.Bundle
UpdateAuthToken(token string)
}
-// NewBundle constructs a new gRPC credential bundle.
+// NewBundle 构造一个新的gRPC凭据包.
func NewBundle(cfg Config) Bundle {
return &bundle{
tc: newTransportCredential(cfg.TLSConfig),
diff --git a/client/v3/ctx.go b/client_sdk/v3/ctx.go
similarity index 94%
rename from client/v3/ctx.go
rename to client_sdk/v3/ctx.go
index 38cee6c27e4..6ea8e05fbfb 100644
--- a/client/v3/ctx.go
+++ b/client_sdk/v3/ctx.go
@@ -17,10 +17,9 @@ package clientv3
import (
"context"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
"google.golang.org/grpc/metadata"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/api/v3/version"
)
// WithRequireLeader requires client requests to only succeed
diff --git a/client/v3/experimental/recipes/barrier.go b/client_sdk/v3/experimental/recipes/barrier.go
similarity index 94%
rename from client/v3/experimental/recipes/barrier.go
rename to client_sdk/v3/experimental/recipes/barrier.go
index 7e950a3e385..4fdfc8bc439 100644
--- a/client/v3/experimental/recipes/barrier.go
+++ b/client_sdk/v3/experimental/recipes/barrier.go
@@ -17,8 +17,8 @@ package recipe
import (
"context"
- "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
)
// Barrier creates a key in etcd to block processes, then deletes the key to
diff --git a/client_sdk/v3/experimental/recipes/client.go b/client_sdk/v3/experimental/recipes/client.go
new file mode 100644
index 00000000000..976a493f3f8
--- /dev/null
+++ b/client_sdk/v3/experimental/recipes/client.go
@@ -0,0 +1,55 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package recipe
+
+import (
+ "context"
+ "errors"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ spb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+)
+
+var (
+ ErrKeyExists = errors.New("key already exists")
+ ErrWaitMismatch = errors.New("unexpected wait result")
+ ErrTooManyClients = errors.New("too many clients")
+ ErrNoWatcher = errors.New("no watcher channel")
+)
+
+// deleteRevKey deletes a key by revision, returning false if key is missing
+func deleteRevKey(kv v3.KV, key string, rev int64) (bool, error) {
+ cmp := v3.Compare(v3.ModRevision(key), "=", rev)
+ req := v3.OpDelete(key)
+ txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit()
+ if err != nil {
+ return false, err
+ } else if !txnresp.Succeeded {
+ return false, nil
+ }
+ return true, nil
+}
+
+func claimFirstKey(kv v3.KV, kvs []*spb.KeyValue) (*spb.KeyValue, error) {
+ for _, k := range kvs {
+ ok, err := deleteRevKey(kv, string(k.Key), k.ModRevision)
+ if err != nil {
+ return nil, err
+ } else if ok {
+ return k, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/client/v3/experimental/recipes/doc.go b/client_sdk/v3/experimental/recipes/doc.go
similarity index 100%
rename from client/v3/experimental/recipes/doc.go
rename to client_sdk/v3/experimental/recipes/doc.go
diff --git a/client_sdk/v3/experimental/recipes/double_barrier.go b/client_sdk/v3/experimental/recipes/double_barrier.go
new file mode 100644
index 00000000000..f087eec3da1
--- /dev/null
+++ b/client_sdk/v3/experimental/recipes/double_barrier.go
@@ -0,0 +1,139 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package recipe
+
+import (
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+)
+
+// DoubleBarrier blocks processes on Enter until an expected count enters, then
+// blocks again on Leave until all processes have left.
+type DoubleBarrier struct {
+ s *concurrency.Session
+ ctx context.Context
+
+ key string // key for the collective barrier
+ count int
+ myKey *EphemeralKV // current key for this process on the barrier
+}
+
+func NewDoubleBarrier(s *concurrency.Session, key string, count int) *DoubleBarrier {
+ return &DoubleBarrier{
+ s: s,
+ ctx: context.TODO(),
+ key: key,
+ count: count,
+ }
+}
+
+// Enter waits for "count" processes to enter the barrier then returns
+func (b *DoubleBarrier) Enter() error {
+ client := b.s.Client()
+ ek, err := newUniqueEphemeralKey(b.s, b.key+"/waiters")
+ if err != nil {
+ return err
+ }
+ b.myKey = ek
+
+ resp, err := client.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix())
+ if err != nil {
+ return err
+ }
+
+ if len(resp.Kvs) > b.count {
+ return ErrTooManyClients
+ }
+
+ if len(resp.Kvs) == b.count {
+ // unblock waiters
+ _, err = client.Put(b.ctx, b.key+"/ready", "")
+ return err
+ }
+
+ _, err = WaitEvents(
+ client,
+ b.key+"/ready",
+ ek.Revision(),
+ []mvccpb.Event_EventType{mvccpb.PUT})
+ return err
+}
+
+// Leave waits for "count" processes to leave the barrier then returns
+func (b *DoubleBarrier) Leave() error {
+ client := b.s.Client()
+ resp, err := client.Get(b.ctx, b.key+"/waiters", clientv3.WithPrefix())
+ if err != nil {
+ return err
+ }
+ if len(resp.Kvs) == 0 {
+ return nil
+ }
+
+ lowest, highest := resp.Kvs[0], resp.Kvs[0]
+ for _, k := range resp.Kvs {
+ if k.ModRevision < lowest.ModRevision {
+ lowest = k
+ }
+ if k.ModRevision > highest.ModRevision {
+ highest = k
+ }
+ }
+ isLowest := string(lowest.Key) == b.myKey.Key()
+
+ if len(resp.Kvs) == 1 {
+ // this is the only node in the barrier; finish up
+ if _, err = client.Delete(b.ctx, b.key+"/ready"); err != nil {
+ return err
+ }
+ return b.myKey.Delete()
+ }
+
+ // this ensures that if a process fails, the ephemeral lease will be
+ // revoked, its barrier key is removed, and the barrier can resume
+
+ // lowest process in node => wait on highest process
+ if isLowest {
+ _, err = WaitEvents(
+ client,
+ string(highest.Key),
+ highest.ModRevision,
+ []mvccpb.Event_EventType{mvccpb.DELETE})
+ if err != nil {
+ return err
+ }
+ return b.Leave()
+ }
+
+ // delete self and wait on lowest process
+ if err = b.myKey.Delete(); err != nil {
+ return err
+ }
+
+ key := string(lowest.Key)
+ _, err = WaitEvents(
+ client,
+ key,
+ lowest.ModRevision,
+ []mvccpb.Event_EventType{mvccpb.DELETE})
+ if err != nil {
+ return err
+ }
+ return b.Leave()
+}
diff --git a/client/v3/experimental/recipes/grpc_gateway/user_add.sh b/client_sdk/v3/experimental/recipes/grpc_gateway/user_add.sh
similarity index 100%
rename from client/v3/experimental/recipes/grpc_gateway/user_add.sh
rename to client_sdk/v3/experimental/recipes/grpc_gateway/user_add.sh
diff --git a/client_sdk/v3/experimental/recipes/key.go b/client_sdk/v3/experimental/recipes/key.go
new file mode 100644
index 00000000000..6206b8c22fa
--- /dev/null
+++ b/client_sdk/v3/experimental/recipes/key.go
@@ -0,0 +1,166 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package recipe
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+)
+
+// RemoteKV is a key/revision pair created by the client and stored on etcd
+type RemoteKV struct {
+ kv v3.KV
+ key string
+ rev int64
+ val string
+}
+
+func newKey(kv v3.KV, key string, leaseID v3.LeaseID) (*RemoteKV, error) {
+ return newKV(kv, key, "", leaseID)
+}
+
+func newKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (*RemoteKV, error) {
+ rev, err := putNewKV(kv, key, val, leaseID)
+ if err != nil {
+ return nil, err
+ }
+ return &RemoteKV{kv, key, rev, val}, nil
+}
+
+func newUniqueKV(kv v3.KV, prefix string, val string) (*RemoteKV, error) {
+ for {
+ // 创建对应的key
+
+ newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano())
+ rev, err := putNewKV(kv, newKey, val, v3.NoLease)
+ if err == nil {
+ return &RemoteKV{kv, newKey, rev, val}, nil
+ }
+ // 如果之前已经创建了,就返回
+ if err != ErrKeyExists {
+ return nil, err
+ }
+ }
+}
+
+// putNewKV
+// 只有在没有创建的时候才能创建成功
+func putNewKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (int64, error) {
+ cmp := v3.Compare(v3.Version(key), "=", 0)
+ req := v3.OpPut(key, val, v3.WithLease(leaseID))
+ txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit()
+ if err != nil {
+ return 0, err
+ }
+ if !txnresp.Succeeded {
+ return 0, ErrKeyExists
+ }
+ return txnresp.Header.Revision, nil
+}
+
+// newSequentialKV allocates a new sequential key /nnnnn with a given
+// prefix and value. Note: a bookkeeping node __ is also allocated.
+func newSequentialKV(kv v3.KV, prefix, val string) (*RemoteKV, error) {
+ resp, err := kv.Get(context.TODO(), prefix, v3.WithLastKey()...)
+ if err != nil {
+ return nil, err
+ }
+
+ // add 1 to last key, if any
+ newSeqNum := 0
+ if len(resp.Kvs) != 0 {
+ fields := strings.Split(string(resp.Kvs[0].Key), "/")
+ _, serr := fmt.Sscanf(fields[len(fields)-1], "%d", &newSeqNum)
+ if serr != nil {
+ return nil, serr
+ }
+ newSeqNum++
+ }
+ newKey := fmt.Sprintf("%s/%016d", prefix, newSeqNum)
+
+ // base prefix key必须是current (i.e., <=) with the etcd update;
+ // the base key is important to avoid the following:
+ // N1: LastKey() == 1, start txn.
+ // N2: new Key 2, new Key 3, Delete Key 2
+ // N1: txn succeeds allocating key 2 when it shouldn't
+ baseKey := "__" + prefix
+
+ // current revision might contain modification so +1
+ cmp := v3.Compare(v3.ModRevision(baseKey), "<", resp.Header.Revision+1)
+ reqPrefix := v3.OpPut(baseKey, "")
+ reqnewKey := v3.OpPut(newKey, val)
+
+ txn := kv.Txn(context.TODO())
+ txnresp, err := txn.If(cmp).Then(reqPrefix, reqnewKey).Commit()
+ if err != nil {
+ return nil, err
+ }
+ if !txnresp.Succeeded {
+ return newSequentialKV(kv, prefix, val)
+ }
+ return &RemoteKV{kv, newKey, txnresp.Header.Revision, val}, nil
+}
+
+func (rk *RemoteKV) Key() string { return rk.key }
+func (rk *RemoteKV) Revision() int64 { return rk.rev }
+func (rk *RemoteKV) Value() string { return rk.val }
+
+func (rk *RemoteKV) Delete() error {
+ if rk.kv == nil {
+ return nil
+ }
+ _, err := rk.kv.Delete(context.TODO(), rk.key)
+ rk.kv = nil
+ return err
+}
+
+func (rk *RemoteKV) Put(val string) error {
+ _, err := rk.kv.Put(context.TODO(), rk.key, val)
+ return err
+}
+
+// EphemeralKV is a new key associated with a session lease
+type EphemeralKV struct{ RemoteKV }
+
+// newEphemeralKV creates a new key/value pair associated with a session lease
+func newEphemeralKV(s *concurrency.Session, key, val string) (*EphemeralKV, error) {
+ k, err := newKV(s.Client(), key, val, s.Lease())
+ if err != nil {
+ return nil, err
+ }
+ return &EphemeralKV{*k}, nil
+}
+
+// newUniqueEphemeralKey creates a new unique valueless key associated with a session lease
+func newUniqueEphemeralKey(s *concurrency.Session, prefix string) (*EphemeralKV, error) {
+ return newUniqueEphemeralKV(s, prefix, "")
+}
+
+// newUniqueEphemeralKV creates a new unique key/value pair associated with a session lease
+func newUniqueEphemeralKV(s *concurrency.Session, prefix, val string) (ek *EphemeralKV, err error) {
+ for {
+ newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano())
+ ek, err = newEphemeralKV(s, newKey, val)
+ if err == nil || err != ErrKeyExists {
+ break
+ }
+ }
+ return ek, err
+}
diff --git a/client/v3/experimental/recipes/priority_queue.go b/client_sdk/v3/experimental/recipes/priority_queue.go
similarity index 95%
rename from client/v3/experimental/recipes/priority_queue.go
rename to client_sdk/v3/experimental/recipes/priority_queue.go
index 1b26067466f..1837310c0be 100644
--- a/client/v3/experimental/recipes/priority_queue.go
+++ b/client_sdk/v3/experimental/recipes/priority_queue.go
@@ -18,8 +18,8 @@ import (
"context"
"fmt"
- "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
)
// PriorityQueue implements a multi-reader, multi-writer distributed queue.
diff --git a/client_sdk/v3/experimental/recipes/queue.go b/client_sdk/v3/experimental/recipes/queue.go
new file mode 100644
index 00000000000..0786471bcc3
--- /dev/null
+++ b/client_sdk/v3/experimental/recipes/queue.go
@@ -0,0 +1,77 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package recipe
+
+import (
+ "context"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+)
+
+// Queue implements a multi-reader, multi-writer distributed queue.
+type Queue struct {
+ client *v3.Client
+ ctx context.Context
+
+ keyPrefix string
+}
+
+func NewQueue(client *v3.Client, keyPrefix string) *Queue {
+ return &Queue{client, context.TODO(), keyPrefix}
+}
+
+func (q *Queue) Enqueue(val string) error {
+ _, err := newUniqueKV(q.client, q.keyPrefix, val)
+ return err
+}
+
+// Dequeue 处理的是一个先进新出的队列
+// 如果队列为空,Dequeue将会阻塞直到里面有值塞入
+func (q *Queue) Dequeue() (string, error) {
+ // TODO: fewer round trips by fetching more than one key
+ resp, err := q.client.Get(q.ctx, q.keyPrefix, v3.WithFirstRev()...)
+ if err != nil {
+ return "", err
+ }
+
+ kv, err := claimFirstKey(q.client, resp.Kvs)
+ if err != nil {
+ return "", err
+ } else if kv != nil {
+ return string(kv.Value), nil
+ } else if resp.More {
+ // missed some items, retry to read in more
+ return q.Dequeue()
+ }
+
+ // nothing yet; wait on elements
+ ev, err := WaitPrefixEvents(
+ q.client,
+ q.keyPrefix,
+ resp.Header.Revision,
+ []mvccpb.Event_EventType{mvccpb.PUT})
+ if err != nil {
+ return "", err
+ }
+
+ ok, err := deleteRevKey(q.client, string(ev.Kv.Key), ev.Kv.ModRevision)
+ if err != nil {
+ return "", err
+ } else if !ok {
+ return q.Dequeue()
+ }
+ return string(ev.Kv.Value), err
+}
diff --git a/client/v3/experimental/recipes/rwmutex.go b/client_sdk/v3/experimental/recipes/rwmutex.go
similarity index 93%
rename from client/v3/experimental/recipes/rwmutex.go
rename to client_sdk/v3/experimental/recipes/rwmutex.go
index 9f520baf48b..c848373cca8 100644
--- a/client/v3/experimental/recipes/rwmutex.go
+++ b/client_sdk/v3/experimental/recipes/rwmutex.go
@@ -17,9 +17,9 @@ package recipe
import (
"context"
- "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
)
type RWMutex struct {
diff --git a/client_sdk/v3/experimental/recipes/watch.go b/client_sdk/v3/experimental/recipes/watch.go
new file mode 100644
index 00000000000..222d2a75498
--- /dev/null
+++ b/client_sdk/v3/experimental/recipes/watch.go
@@ -0,0 +1,58 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package recipe
+
+import (
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+)
+
+// WaitEvents waits on a key until it observes the given events and returns the final one.
+func WaitEvents(c *clientv3.Client, key string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ wc := c.Watch(ctx, key, clientv3.WithRev(rev))
+ if wc == nil {
+ return nil, ErrNoWatcher
+ }
+ return waitEvents(wc, evs), nil
+}
+
+func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ wc := c.Watch(ctx, prefix, clientv3.WithPrefix(), clientv3.WithRev(rev))
+ if wc == nil {
+ return nil, ErrNoWatcher
+ }
+ return waitEvents(wc, evs), nil
+}
+
+func waitEvents(wc clientv3.WatchChan, evs []mvccpb.Event_EventType) *clientv3.Event {
+ i := 0
+ for wresp := range wc {
+ for _, ev := range wresp.Events {
+ if ev.Type == evs[i] {
+ i++
+ if i == len(evs) {
+ return ev
+ }
+ }
+ }
+ }
+ return nil
+}
diff --git a/client_sdk/v3/internal/endpoint/over_endpoint.go b/client_sdk/v3/internal/endpoint/over_endpoint.go
new file mode 100644
index 00000000000..34bd7beae13
--- /dev/null
+++ b/client_sdk/v3/internal/endpoint/over_endpoint.go
@@ -0,0 +1,136 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package endpoint
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "path"
+ "strings"
+)
+
+type CredsRequirement int
+
+const (
+ // CREDS_REQUIRE - Credentials/certificate required for thi type of connection.
+ CREDS_REQUIRE CredsRequirement = iota
+ // CREDS_DROP - Credentials/certificate not needed and should get ignored.
+ CREDS_DROP
+ // CREDS_OPTIONAL - Credentials/certificate might be used if supplied
+ CREDS_OPTIONAL
+)
+
+func extractHostFromHostPort(ep string) string {
+ host, _, err := net.SplitHostPort(ep)
+ if err != nil {
+ return ep
+ }
+ return host
+}
+
+func extractHostFromPath(pathStr string) string {
+ return extractHostFromHostPort(path.Base(pathStr))
+}
+
+// mustSplit2 returns the values from strings.SplitN(s, sep, 2).
+// If sep is not found, it returns ("", "", false) instead.
+func mustSplit2(s, sep string) (string, string) {
+ spl := strings.SplitN(s, sep, 2)
+ if len(spl) < 2 {
+ panic(fmt.Errorf("token '%v' expected to have separator sep: `%v`", s, sep))
+ }
+ return spl[0], spl[1]
+}
+
+func schemeToCredsRequirement(schema string) CredsRequirement {
+ switch schema {
+ case "https", "unixs":
+ return CREDS_REQUIRE
+ case "http":
+ return CREDS_DROP
+ case "unix":
+ // Preserving previous behavior from:
+ // https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212
+ // that likely was a bug due to missing 'fallthrough'.
+ // At the same time it seems legit to let the users decide whether they
+ // want credential control or not (and 'unixs' schema is not a standard thing).
+ return CREDS_OPTIONAL
+ case "":
+ return CREDS_OPTIONAL
+ default:
+ return CREDS_OPTIONAL
+ }
+}
+
+// This function translates endpoints names supported by etcd etcd into
+// endpoints as supported by grpc with additional information
+// (server_name for cert validation, requireCreds - whether certs are needed).
+// The main differences:
+// - etcd supports unixs & https names as opposed to unix & http to
+// distinguish need to configure certificates.
+// - etcd support http(s) names as opposed to tcp supported by grpc/dial method.
+// - etcd supports unix(s)://local-file naming schema
+// (as opposed to unix:local-file canonical name used by grpc for current dir files).
+// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon)
+// is considered serverName - to allow local testing of cert-protected communication.
+// See more:
+// - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47
+// - https://golang.org/pkg/net/#Dial
+// - https://github.com/grpc/grpc/blob/master/doc/naming.md
+func translateEndpoint(ep string) (addr string, serverName string, requireCreds CredsRequirement) {
+ if strings.HasPrefix(ep, "unix:") || strings.HasPrefix(ep, "unixs:") {
+ if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") {
+ // absolute path case
+ schema, absolutePath := mustSplit2(ep, "://")
+ return "unix://" + absolutePath, extractHostFromPath(absolutePath), schemeToCredsRequirement(schema)
+ }
+ if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") {
+ // legacy etcd local path
+ schema, localPath := mustSplit2(ep, "://")
+ return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema)
+ }
+ schema, localPath := mustSplit2(ep, ":")
+ return "unix:" + localPath, extractHostFromPath(localPath), schemeToCredsRequirement(schema)
+ }
+
+ if strings.Contains(ep, "://") {
+ url, err := url.Parse(ep)
+ if err != nil {
+ return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL
+ }
+ if url.Scheme == "http" || url.Scheme == "https" {
+ return url.Host, url.Hostname(), schemeToCredsRequirement(url.Scheme)
+ }
+ return ep, url.Hostname(), schemeToCredsRequirement(url.Scheme)
+ }
+ // Handles plain addresses like 10.0.0.44:437.
+ return ep, extractHostFromHostPort(ep), CREDS_OPTIONAL
+}
+
+// RequiresCredentials 127.0.0.1:2379
+func RequiresCredentials(ep string) CredsRequirement {
+ _, _, requireCreds := translateEndpoint(ep)
+ return requireCreds
+}
+
+// Interpret endpoint parses an endpoint of the form
+// (http|https)://*|(unix|unixs)://)
+// and returns low-level address (supported by 'net') to connect to,
+// and a etcd name used for x509 certificate matching.
+func Interpret(ep string) (address string, serverName string) {
+ addr, serverName, _ := translateEndpoint(ep)
+ return addr, serverName
+}
diff --git a/client_sdk/v3/internal/resolver/resolver.go b/client_sdk/v3/internal/resolver/resolver.go
new file mode 100644
index 00000000000..63d68ad5dcf
--- /dev/null
+++ b/client_sdk/v3/internal/resolver/resolver.go
@@ -0,0 +1,71 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resolver
+
+import (
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/internal/endpoint"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/resolver/manual"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+const (
+ Schema = "etcd-endpoints"
+)
+
+type EtcdManualResolver struct {
+ *manual.Resolver
+ endpoints []string
+ serviceConfig *serviceconfig.ParseResult
+}
+
+func New(endpoints ...string) *EtcdManualResolver {
+ r := manual.NewBuilderWithScheme(Schema) // etcd-endpoints
+ return &EtcdManualResolver{Resolver: r, endpoints: endpoints, serviceConfig: nil}
+}
+
+func (r *EtcdManualResolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+ r.serviceConfig = cc.ParseServiceConfig(`{"loadBalancingPolicy": "round_robin"}`)
+ if r.serviceConfig.Err != nil {
+ return nil, r.serviceConfig.Err
+ }
+ res, err := r.Resolver.Build(target, cc, opts)
+ if err != nil {
+ return nil, err
+ }
+ // 将存储在r中的端点填充到ClientConn (cc)中.
+ r.updateState()
+ return res, nil
+}
+
+func (r *EtcdManualResolver) SetEndpoints(endpoints []string) {
+ r.endpoints = endpoints
+ r.updateState()
+}
+
+func (r EtcdManualResolver) updateState() {
+ if r.CC != nil {
+ addresses := make([]resolver.Address, len(r.endpoints))
+ for i, ep := range r.endpoints {
+ addr, serverName := endpoint.Interpret(ep)
+ addresses[i] = resolver.Address{Addr: addr, ServerName: serverName}
+ }
+ state := resolver.State{
+ Addresses: addresses,
+ ServiceConfig: r.serviceConfig,
+ }
+ r.UpdateState(state)
+ }
+}
diff --git a/client_sdk/v3/kv.go b/client_sdk/v3/kv.go
new file mode 100644
index 00000000000..0563fcb2651
--- /dev/null
+++ b/client_sdk/v3/kv.go
@@ -0,0 +1,156 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "google.golang.org/grpc"
+)
+
+type (
+ CompactResponse pb.CompactionResponse
+ PutResponse pb.PutResponse
+ GetResponse pb.RangeResponse
+ DeleteResponse pb.DeleteRangeResponse
+ TxnResponse pb.TxnResponse
+)
+
+type KV interface {
+ Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
+ Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
+ Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
+ Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
+ Do(ctx context.Context, op Op) (OpResponse, error)
+ Txn(ctx context.Context) Txn
+}
+
+type OpResponse struct {
+ put *PutResponse
+ get *GetResponse
+ del *DeleteResponse
+ txn *TxnResponse
+}
+
+func (op OpResponse) Put() *PutResponse { return op.put }
+func (op OpResponse) Get() *GetResponse { return op.get }
+func (op OpResponse) Del() *DeleteResponse { return op.del }
+func (op OpResponse) Txn() *TxnResponse { return op.txn }
+
+func (resp *PutResponse) OpResponse() OpResponse {
+ return OpResponse{put: resp}
+}
+
+func (resp *GetResponse) OpResponse() OpResponse {
+ return OpResponse{get: resp}
+}
+
+func (resp *DeleteResponse) OpResponse() OpResponse {
+ return OpResponse{del: resp}
+}
+
+func (resp *TxnResponse) OpResponse() OpResponse {
+ return OpResponse{txn: resp}
+}
+
+type kv struct {
+ remote pb.KVClient
+ callOpts []grpc.CallOption
+}
+
+func NewKV(c *Client) KV {
+ api := &kv{remote: RetryKVClient(c)}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
+ api := &kv{remote: remote}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
+ r, err := kv.Do(ctx, OpPut(key, val, opts...))
+ return r.put, toErr(ctx, err)
+}
+
+// Get etcdctl get
+func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
+ r, err := kv.Do(ctx, OpGet(key, opts...))
+ return r.get, toErr(ctx, err)
+}
+
+func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
+ r, err := kv.Do(ctx, OpDelete(key, opts...))
+ return r.del, toErr(ctx, err)
+}
+
+func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
+ resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ return (*CompactResponse)(resp), err
+}
+
+func (kv *kv) Txn(ctx context.Context) Txn {
+ return &txn{
+ kv: kv,
+ ctx: ctx,
+ callOpts: kv.callOpts,
+ }
+}
+
+func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
+ var err error
+ switch op.t {
+ case tRange:
+ var resp *pb.RangeResponse
+ resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
+ if err == nil {
+ return OpResponse{get: (*GetResponse)(resp)}, nil
+ }
+ case tPut:
+ var resp *pb.PutResponse
+ r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
+ resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
+ if err == nil {
+ return OpResponse{put: (*PutResponse)(resp)}, nil
+ }
+ case tDeleteRange:
+ var resp *pb.DeleteRangeResponse
+ r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
+ resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
+ if err == nil {
+ return OpResponse{del: (*DeleteResponse)(resp)}, nil
+ }
+ case tTxn:
+ var resp *pb.TxnResponse
+ resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
+ if err == nil {
+ return OpResponse{txn: (*TxnResponse)(resp)}, nil
+ }
+ default:
+ panic("未知的操作")
+ }
+ return OpResponse{}, toErr(ctx, err)
+}
diff --git a/client_sdk/v3/lease.go b/client_sdk/v3/lease.go
new file mode 100644
index 00000000000..b4ff8bf4f8d
--- /dev/null
+++ b/client_sdk/v3/lease.go
@@ -0,0 +1,570 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+)
+
+type (
+ LeaseRevokeResponse pb.LeaseRevokeResponse
+ LeaseID int64
+)
+
+type LeaseGrantResponse struct {
+ *pb.ResponseHeader
+ ID LeaseID
+ TTL int64
+ Error string
+}
+
+type LeaseKeepAliveResponse struct {
+ *pb.ResponseHeader
+ ID LeaseID
+ TTL int64
+}
+
+type LeaseTimeToLiveResponse struct {
+ *pb.ResponseHeader
+ ID LeaseID `json:"id"`
+
+ // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
+ TTL int64 `json:"ttl"`
+
+ // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+ GrantedTTL int64 `json:"granted-ttl"`
+
+ // Keys is the list of keys attached to this lease.
+ Keys [][]byte `json:"keys"`
+}
+
+type LeaseStatus struct {
+ ID LeaseID `json:"id"`
+ // TODO: TTL int64
+}
+
+type LeaseLeasesResponse struct {
+ *pb.ResponseHeader
+ Leases []LeaseStatus `json:"leases"`
+}
+
+const (
+ // defaultTTL is the assumed lease TTL used for the first keepalive
+ // deadline before the actual TTL is known to the client.
+ defaultTTL = 5 * time.Second
+ // NoLease is a lease ID for the absence of a lease.
+ NoLease LeaseID = 0
+
+ // retryConnWait is how long to wait before retrying request due to an error
+ retryConnWait = 500 * time.Millisecond
+)
+
+// LeaseResponseChSize is the size of buffer to store unsent lease responses.
+// WARNING: DO NOT UPDATE.
+// Only for testing purposes.
+var LeaseResponseChSize = 16
+
+// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
+//
+// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
+type ErrKeepAliveHalted struct {
+ Reason error
+}
+
+func (e ErrKeepAliveHalted) Error() string {
+ s := "etcdclient: leases keep alive halted"
+ if e.Reason != nil {
+ s += ": " + e.Reason.Error()
+ }
+ return s
+}
+
+type Lease interface {
+ Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
+ Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
+ TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
+ Leases(ctx context.Context) (*LeaseLeasesResponse, error)
+ KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
+ KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
+ Close() error
+}
+
+type lessor struct {
+ mu sync.Mutex // guards all fields
+
+ // donec is closed and loopErr is set when recvKeepAliveLoop stops
+ donec chan struct{}
+ loopErr error
+
+ remote pb.LeaseClient
+
+ stream pb.Lease_LeaseKeepAliveClient
+ streamCancel context.CancelFunc
+
+ stopCtx context.Context
+ stopCancel context.CancelFunc
+
+ keepAlives map[LeaseID]*keepAlive
+
+ // firstKeepAliveTimeout is the timeout for the first keepalive request
+ // before the actual TTL is known to the lease client
+ firstKeepAliveTimeout time.Duration
+
+ // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
+ firstKeepAliveOnce sync.Once
+
+ callOpts []grpc.CallOption
+
+ lg *zap.Logger
+}
+
+type keepAlive struct {
+ chs []chan<- *LeaseKeepAliveResponse
+ ctxs []context.Context
+ // deadline is the time the keep alive channels close if no response
+ deadline time.Time
+ // nextKeepAlive is when to send the next keep alive message
+ nextKeepAlive time.Time
+ // donec is closed on lease revoke, expiration, or cancel.
+ donec chan struct{}
+}
+
+func NewLease(c *Client) Lease {
+ return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
+}
+
+func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
+ l := &lessor{
+ donec: make(chan struct{}),
+ keepAlives: make(map[LeaseID]*keepAlive),
+ remote: remote,
+ firstKeepAliveTimeout: keepAliveTimeout,
+ lg: c.lg,
+ }
+ if l.firstKeepAliveTimeout == time.Second {
+ l.firstKeepAliveTimeout = defaultTTL
+ }
+ if c != nil {
+ l.callOpts = c.callOpts
+ }
+ reqLeaderCtx := WithRequireLeader(context.Background())
+ l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
+ return l
+}
+
+func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
+ r := &pb.LeaseGrantRequest{TTL: ttl}
+ fmt.Println("lease--->:", *r)
+ resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
+ if err == nil {
+ gresp := &LeaseGrantResponse{
+ ResponseHeader: resp.GetHeader(),
+ ID: LeaseID(resp.ID),
+ TTL: resp.TTL,
+ Error: resp.Error,
+ }
+ return gresp, nil
+ }
+ return nil, toErr(ctx, err)
+}
+
+func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
+ r := &pb.LeaseRevokeRequest{ID: int64(id)}
+ resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
+ if err == nil {
+ return (*LeaseRevokeResponse)(resp), nil
+ }
+ return nil, toErr(ctx, err)
+}
+
+func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
+ r := toLeaseTimeToLiveRequest(id, opts...)
+ resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ gresp := &LeaseTimeToLiveResponse{
+ ResponseHeader: resp.GetHeader(),
+ ID: LeaseID(resp.ID),
+ TTL: resp.TTL,
+ GrantedTTL: resp.GrantedTTL,
+ Keys: resp.Keys,
+ }
+ return gresp, nil
+}
+
+func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
+ resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
+ if err == nil {
+ leases := make([]LeaseStatus, len(resp.Leases))
+ for i := range resp.Leases {
+ leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
+ }
+ return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
+ }
+ return nil, toErr(ctx, err)
+}
+
+// KeepAlive 尝试保持给定的租约永久alive
+func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
+ ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
+
+ l.mu.Lock()
+ // ensure that recvKeepAliveLoop is still running
+ select {
+ case <-l.donec:
+ err := l.loopErr
+ l.mu.Unlock()
+ close(ch)
+ return ch, ErrKeepAliveHalted{Reason: err}
+ default:
+ }
+ ka, ok := l.keepAlives[id]
+ if !ok {
+ // create fresh keep alive
+ ka = &keepAlive{
+ chs: []chan<- *LeaseKeepAliveResponse{ch},
+ ctxs: []context.Context{ctx},
+ deadline: time.Now().Add(l.firstKeepAliveTimeout),
+ nextKeepAlive: time.Now(),
+ donec: make(chan struct{}),
+ }
+ l.keepAlives[id] = ka
+ } else {
+ // add channel and context to existing keep alive
+ ka.ctxs = append(ka.ctxs, ctx)
+ ka.chs = append(ka.chs, ch)
+ }
+ l.mu.Unlock()
+
+ go l.keepAliveCtxCloser(ctx, id, ka.donec)
+ l.firstKeepAliveOnce.Do(func() {
+ // 500毫秒一次,不断的发送保持活动请求
+ go l.recvKeepAliveLoop()
+ // 删除等待太久没反馈的租约
+ go l.deadlineLoop()
+ })
+
+ return ch, nil
+}
+
+func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
+ for {
+ resp, err := l.keepAliveOnce(ctx, id)
+ if err == nil {
+ if resp.TTL <= 0 {
+ err = rpctypes.ErrLeaseNotFound
+ }
+ return resp, err
+ }
+ if isHaltErr(ctx, err) {
+ return nil, toErr(ctx, err)
+ }
+ }
+}
+
+func (l *lessor) Close() error {
+ l.stopCancel()
+ // close for synchronous teardown if stream goroutines never launched
+ l.firstKeepAliveOnce.Do(func() { close(l.donec) })
+ <-l.donec
+ return nil
+}
+
+func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
+ select {
+ case <-donec:
+ return
+ case <-l.donec:
+ return
+ case <-ctx.Done():
+ }
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ ka, ok := l.keepAlives[id]
+ if !ok {
+ return
+ }
+
+ // close channel and remove context if still associated with keep alive
+ for i, c := range ka.ctxs {
+ if c == ctx {
+ close(ka.chs[i])
+ ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
+ ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
+ break
+ }
+ }
+ // remove if no one more listeners
+ if len(ka.chs) == 0 {
+ delete(l.keepAlives, id)
+ }
+}
+
+// closeRequireLeader scans keepAlives for ctxs that have require leader
+// and closes the associated channels.
+func (l *lessor) closeRequireLeader() {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ for _, ka := range l.keepAlives {
+ reqIdxs := 0
+ // find all required leader channels, close, mark as nil
+ for i, ctx := range ka.ctxs {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok {
+ continue
+ }
+ ks := md[rpctypes.MetadataRequireLeaderKey]
+ if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
+ continue
+ }
+ close(ka.chs[i])
+ ka.chs[i] = nil
+ reqIdxs++
+ }
+ if reqIdxs == 0 {
+ continue
+ }
+ // remove all channels that required a leader from keepalive
+ newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
+ newCtxs := make([]context.Context, len(newChs))
+ newIdx := 0
+ for i := range ka.chs {
+ if ka.chs[i] == nil {
+ continue
+ }
+ newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
+ newIdx++
+ }
+ ka.chs, ka.ctxs = newChs, newCtxs
+ }
+}
+
+func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
+ cctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+
+ err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+
+ resp, rerr := stream.Recv()
+ if rerr != nil {
+ return nil, toErr(ctx, rerr)
+ }
+
+ karesp := &LeaseKeepAliveResponse{
+ ResponseHeader: resp.GetHeader(),
+ ID: LeaseID(resp.ID),
+ TTL: resp.TTL,
+ }
+ return karesp, nil
+}
+
+func (l *lessor) recvKeepAliveLoop() (gerr error) {
+ defer func() {
+ l.mu.Lock()
+ close(l.donec)
+ l.loopErr = gerr
+ for _, ka := range l.keepAlives {
+ ka.close()
+ }
+ l.keepAlives = make(map[LeaseID]*keepAlive)
+ l.mu.Unlock()
+ }()
+
+ for {
+ stream, err := l.resetRecv()
+ if err != nil {
+ if canceledByCaller(l.stopCtx, err) {
+ return err
+ }
+ } else {
+ for {
+ // 打开一个新的lease stream并开始发送保持活动请求.
+ resp, err := stream.Recv()
+ if err != nil {
+ if canceledByCaller(l.stopCtx, err) {
+ return err
+ }
+
+ if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
+ l.closeRequireLeader()
+ }
+ break
+ }
+ // 根据LeaseKeepAliveResponse更新租约
+ // 如果租约过期删除所有alive channels
+ l.recvKeepAlive(resp)
+ }
+ }
+
+ select {
+ case <-time.After(retryConnWait):
+ case <-l.stopCtx.Done():
+ return l.stopCtx.Err()
+ }
+ }
+}
+
+// resetRecv opens a new lease stream and starts sending keep alive requests.
+// 打开一个新的lease stream并开始发送保持活动请求.
+func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
+ sctx, cancel := context.WithCancel(l.stopCtx)
+ // 建立服务端和客户端连接的lease stream
+ stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ if l.stream != nil && l.streamCancel != nil {
+ l.streamCancel()
+ }
+
+ l.streamCancel = cancel
+ l.stream = stream
+
+ go l.sendKeepAliveLoop(stream)
+ return stream, nil
+}
+
+// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
+func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
+ karesp := &LeaseKeepAliveResponse{
+ ResponseHeader: resp.GetHeader(),
+ ID: LeaseID(resp.ID),
+ TTL: resp.TTL,
+ }
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ ka, ok := l.keepAlives[karesp.ID]
+ if !ok {
+ return
+ }
+
+ if karesp.TTL <= 0 {
+ // lease expired; close all keep alive channels
+ delete(l.keepAlives, karesp.ID)
+ ka.close()
+ return
+ }
+
+ // send update to all channels
+ nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
+ ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
+ for _, ch := range ka.chs {
+ select {
+ case ch <- karesp:
+ default:
+ if l.lg != nil {
+ l.lg.Warn("lease keepalive response queue is full; dropping response send",
+ zap.Int("queue-size", len(ch)),
+ zap.Int("queue-capacity", cap(ch)),
+ )
+ }
+ }
+ // still advance in order to rate-limit keep-alive sends
+ ka.nextKeepAlive = nextKeepAlive
+ }
+}
+
+// deadlineLoop reaps any keep alive channels that have not received a response
+// within the lease TTL
+// 获取在租约TTL中没有收到响应的任何保持活动的通道
+func (l *lessor) deadlineLoop() {
+ for {
+ select {
+ case <-time.After(time.Second):
+ case <-l.donec:
+ return
+ }
+ now := time.Now()
+ l.mu.Lock()
+ for id, ka := range l.keepAlives {
+ if ka.deadline.Before(now) {
+ // 等待响应太久;租约可能已过期
+ // waited too long for response; lease may be expired
+ ka.close()
+ delete(l.keepAlives, id)
+ }
+ }
+ l.mu.Unlock()
+ }
+}
+
+// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
+// 在给定流的生命周期内发送保持活动请求
+func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
+ for {
+ var tosend []LeaseID
+
+ now := time.Now()
+ l.mu.Lock()
+ for id, ka := range l.keepAlives {
+ if ka.nextKeepAlive.Before(now) {
+ tosend = append(tosend, id)
+ }
+ }
+ l.mu.Unlock()
+
+ for _, id := range tosend {
+ r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
+ if err := stream.Send(r); err != nil {
+ // TODO do something with this error?
+ return
+ }
+ }
+
+ select {
+ case <-time.After(retryConnWait):
+ case <-stream.Context().Done():
+ return
+ case <-l.donec:
+ return
+ case <-l.stopCtx.Done():
+ return
+ }
+ }
+}
+
+func (ka *keepAlive) close() {
+ close(ka.donec)
+ for _, ch := range ka.chs {
+ close(ch)
+ }
+}
diff --git a/client/v3/leasing/cache.go b/client_sdk/v3/leasing/cache.go
similarity index 94%
rename from client/v3/leasing/cache.go
rename to client_sdk/v3/leasing/cache.go
index 214ee2fc196..a4215c6b98a 100644
--- a/client/v3/leasing/cache.go
+++ b/client_sdk/v3/leasing/cache.go
@@ -20,9 +20,9 @@ import (
"sync"
"time"
- v3pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ v3pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
)
const revokeBackoff = 2 * time.Second
@@ -144,7 +144,7 @@ func (lc *leaseCache) Update(key, val []byte, respHeader *v3pb.ResponseHeader) {
cacheResp := li.response
if len(cacheResp.Kvs) == 0 {
kv := &mvccpb.KeyValue{
- Key: key,
+ Key: string(key),
CreateRevision: respHeader.Revision,
}
cacheResp.Kvs = append(cacheResp.Kvs, kv)
@@ -154,7 +154,7 @@ func (lc *leaseCache) Update(key, val []byte, respHeader *v3pb.ResponseHeader) {
if cacheResp.Kvs[0].ModRevision < respHeader.Revision {
cacheResp.Header = respHeader
cacheResp.Kvs[0].ModRevision = respHeader.Revision
- cacheResp.Kvs[0].Value = val
+ cacheResp.Kvs[0].Value = string(val)
}
}
@@ -228,11 +228,9 @@ func (lk *leaseKey) get(op v3.Op) *v3.GetResponse {
ret.Kvs = nil
} else {
kv := *ret.Kvs[0]
- kv.Key = make([]byte, len(kv.Key))
- copy(kv.Key, ret.Kvs[0].Key)
+ kv.Key = ret.Kvs[0].Key
if !op.IsKeysOnly() {
- kv.Value = make([]byte, len(kv.Value))
- copy(kv.Value, ret.Kvs[0].Value)
+ kv.Value = ret.Kvs[0].Value
}
ret.Kvs = []*mvccpb.KeyValue{&kv}
}
@@ -297,7 +295,7 @@ func (lc *leaseCache) evalOps(ops []v3.Op) ([]*v3pb.ResponseOp, bool) {
return nil, false
}
resps[i] = &v3pb.ResponseOp{
- Response: &v3pb.ResponseOp_ResponseRange{
+ ResponseOp_ResponseRange: &v3pb.ResponseOp_ResponseRange{
ResponseRange: (*v3pb.RangeResponse)(resp),
},
}
diff --git a/client_sdk/v3/leasing/doc.go b/client_sdk/v3/leasing/doc.go
new file mode 100644
index 00000000000..5ffb8b70c21
--- /dev/null
+++ b/client_sdk/v3/leasing/doc.go
@@ -0,0 +1,46 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package leasing serves linearizable reads from a local cache by acquiring
+// exclusive write access to keys through a client-side leasing protocol. This
+// leasing layer can either directly wrap the etcd client or it can be exposed
+// through the etcd grpc proxy etcd, granting multiple clients write access.
+//
+// First, create a leasing KV from a clientv3.Client 'cli':
+//
+// lkv, err := leasing.NewKV(cli, "leasing-prefix")
+// if err != nil {
+// // handle error
+// }
+//
+// A range request for a key "abc" tries to acquire a leasing key so it can cache the range's
+// key locally. On the etcd, the leasing key is stored to "leasing-prefix/abc":
+//
+// resp, err := lkv.Get(context.TODO(), "abc")
+//
+// Future linearized read requests using 'lkv' will be served locally for the lease's lifetime:
+//
+// resp, err = lkv.Get(context.TODO(), "abc")
+//
+// If another leasing client writes to a leased key, then the owner relinquishes its exclusive
+// access, permitting the writer to modify the key:
+//
+// lkv2, err := leasing.NewKV(cli, "leasing-prefix")
+// if err != nil {
+// // handle error
+// }
+// lkv2.Put(context.TODO(), "abc", "456")
+// resp, err = lkv.Get("abc")
+//
+package leasing
diff --git a/client_sdk/v3/leasing/kv.go b/client_sdk/v3/leasing/kv.go
new file mode 100644
index 00000000000..35022b1274d
--- /dev/null
+++ b/client_sdk/v3/leasing/kv.go
@@ -0,0 +1,479 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasing
+
+import (
+ "context"
+ "strings"
+ "sync"
+ "time"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+type leasingKV struct {
+ cl *v3.Client
+ kv v3.KV
+ pfx string
+ leases leaseCache
+
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ sessionOpts []concurrency.SessionOption
+ session *concurrency.Session
+ sessionc chan struct{}
+}
+
+var closedCh chan struct{}
+
+func init() {
+ closedCh = make(chan struct{})
+ close(closedCh)
+}
+
+// NewKV wraps a KV instance so that all requests are wired through a leasing protocol.
+func NewKV(cl *v3.Client, pfx string, opts ...concurrency.SessionOption) (v3.KV, func(), error) {
+ cctx, cancel := context.WithCancel(cl.Ctx())
+ lkv := &leasingKV{
+ cl: cl,
+ kv: cl.KV,
+ pfx: pfx,
+ leases: leaseCache{revokes: make(map[string]time.Time)},
+ ctx: cctx,
+ cancel: cancel,
+ sessionOpts: opts,
+ sessionc: make(chan struct{}),
+ }
+ lkv.wg.Add(2)
+ go func() {
+ defer lkv.wg.Done()
+ lkv.monitorSession()
+ }()
+ go func() {
+ defer lkv.wg.Done()
+ lkv.leases.clearOldRevokes(cctx)
+ }()
+ return lkv, lkv.Close, lkv.waitSession(cctx)
+}
+
+func (lkv *leasingKV) Close() {
+ lkv.cancel()
+ lkv.wg.Wait()
+}
+
+func (lkv *leasingKV) Get(ctx context.Context, key string, opts ...v3.OpOption) (*v3.GetResponse, error) {
+ return lkv.get(ctx, v3.OpGet(key, opts...))
+}
+
+func (lkv *leasingKV) Put(ctx context.Context, key, val string, opts ...v3.OpOption) (*v3.PutResponse, error) {
+ return lkv.put(ctx, v3.OpPut(key, val, opts...))
+}
+
+func (lkv *leasingKV) Delete(ctx context.Context, key string, opts ...v3.OpOption) (*v3.DeleteResponse, error) {
+ return lkv.delete(ctx, v3.OpDelete(key, opts...))
+}
+
+func (lkv *leasingKV) Do(ctx context.Context, op v3.Op) (v3.OpResponse, error) {
+ switch {
+ case op.IsGet():
+ resp, err := lkv.get(ctx, op)
+ return resp.OpResponse(), err
+ case op.IsPut():
+ resp, err := lkv.put(ctx, op)
+ return resp.OpResponse(), err
+ case op.IsDelete():
+ resp, err := lkv.delete(ctx, op)
+ return resp.OpResponse(), err
+ case op.IsTxn():
+ cmps, thenOps, elseOps := op.Txn()
+ resp, err := lkv.Txn(ctx).If(cmps...).Then(thenOps...).Else(elseOps...).Commit()
+ return resp.OpResponse(), err
+ }
+ return v3.OpResponse{}, nil
+}
+
+func (lkv *leasingKV) Compact(ctx context.Context, rev int64, opts ...v3.CompactOption) (*v3.CompactResponse, error) {
+ return lkv.kv.Compact(ctx, rev, opts...)
+}
+
+func (lkv *leasingKV) Txn(ctx context.Context) v3.Txn {
+ return &txnLeasing{Txn: lkv.kv.Txn(ctx), lkv: lkv, ctx: ctx}
+}
+
+func (lkv *leasingKV) monitorSession() {
+ for lkv.ctx.Err() == nil {
+ if lkv.session != nil {
+ select {
+ case <-lkv.session.Done():
+ case <-lkv.ctx.Done():
+ return
+ }
+ }
+ lkv.leases.mu.Lock()
+ select {
+ case <-lkv.sessionc:
+ lkv.sessionc = make(chan struct{})
+ default:
+ }
+ lkv.leases.entries = make(map[string]*leaseKey)
+ lkv.leases.mu.Unlock()
+
+ s, err := concurrency.NewSession(lkv.cl, lkv.sessionOpts...)
+ if err != nil {
+ continue
+ }
+
+ lkv.leases.mu.Lock()
+ lkv.session = s
+ close(lkv.sessionc)
+ lkv.leases.mu.Unlock()
+ }
+}
+
+func (lkv *leasingKV) monitorLease(ctx context.Context, key string, rev int64) {
+ cctx, cancel := context.WithCancel(lkv.ctx)
+ defer cancel()
+ for cctx.Err() == nil {
+ if rev == 0 {
+ resp, err := lkv.kv.Get(ctx, lkv.pfx+key)
+ if err != nil {
+ continue
+ }
+ rev = resp.Header.Revision
+ if len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) == "REVOKE" {
+ lkv.rescind(cctx, key, rev)
+ return
+ }
+ }
+ wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1))
+ for resp := range wch {
+ for _, ev := range resp.Events {
+ if string(ev.Kv.Value) != "REVOKE" {
+ continue
+ }
+ if v3.LeaseID(ev.Kv.Lease) == lkv.leaseID() {
+ lkv.rescind(cctx, key, ev.Kv.ModRevision)
+ }
+ return
+ }
+ }
+ rev = 0
+ }
+}
+
+// rescind releases a lease from this client.
+func (lkv *leasingKV) rescind(ctx context.Context, key string, rev int64) {
+ if lkv.leases.Evict(key) > rev {
+ return
+ }
+ cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev)
+ op := v3.OpDelete(lkv.pfx + key)
+ for ctx.Err() == nil {
+ if _, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit(); err == nil {
+ return
+ }
+ }
+}
+
+func (lkv *leasingKV) waitRescind(ctx context.Context, key string, rev int64) error {
+ cctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1))
+ for resp := range wch {
+ for _, ev := range resp.Events {
+ if ev.Type == v3.EventTypeDelete {
+ return ctx.Err()
+ }
+ }
+ }
+ return ctx.Err()
+}
+
+func (lkv *leasingKV) tryModifyOp(ctx context.Context, op v3.Op) (*v3.TxnResponse, chan<- struct{}, error) {
+ key := string(op.KeyBytes())
+ wc, rev := lkv.leases.Lock(key)
+ cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)
+ resp, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit()
+ switch {
+ case err != nil:
+ lkv.leases.Evict(key)
+ fallthrough
+ case !resp.Succeeded:
+ if wc != nil {
+ close(wc)
+ }
+ return nil, nil, err
+ }
+ return resp, wc, nil
+}
+
+func (lkv *leasingKV) put(ctx context.Context, op v3.Op) (pr *v3.PutResponse, err error) {
+ if err := lkv.waitSession(ctx); err != nil {
+ return nil, err
+ }
+ for ctx.Err() == nil {
+ resp, wc, err := lkv.tryModifyOp(ctx, op)
+ if err != nil || wc == nil {
+ resp, err = lkv.revoke(ctx, string(op.KeyBytes()), op)
+ }
+ if err != nil {
+ return nil, err
+ }
+ if resp.Succeeded {
+ lkv.leases.mu.Lock()
+ lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), resp.Header)
+ lkv.leases.mu.Unlock()
+ pr = (*v3.PutResponse)(resp.Responses[0].GetResponsePut())
+ pr.Header = resp.Header
+ }
+ if wc != nil {
+ close(wc)
+ }
+ if resp.Succeeded {
+ return pr, nil
+ }
+ }
+ return nil, ctx.Err()
+}
+
+func (lkv *leasingKV) acquire(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) {
+ for ctx.Err() == nil {
+ if err := lkv.waitSession(ctx); err != nil {
+ return nil, err
+ }
+ lcmp := v3.Cmp{Key: key, Target: pb.Compare_LEASE}
+ resp, err := lkv.kv.Txn(ctx).If(
+ v3.Compare(v3.CreateRevision(lkv.pfx+key), "=", 0),
+ v3.Compare(lcmp, "=", 0)).
+ Then(
+ op,
+ v3.OpPut(lkv.pfx+key, "", v3.WithLease(lkv.leaseID()))).
+ Else(
+ op,
+ v3.OpGet(lkv.pfx+key),
+ ).Commit()
+ if err == nil {
+ if !resp.Succeeded {
+ kvs := resp.Responses[1].GetResponseRange().Kvs
+ // if txn failed since already owner, lease is acquired
+ resp.Succeeded = len(kvs) > 0 && v3.LeaseID(kvs[0].Lease) == lkv.leaseID()
+ }
+ return resp, nil
+ }
+ // retry if transient error
+ if _, ok := err.(rpctypes.EtcdError); ok {
+ return nil, err
+ }
+ if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
+ return nil, err
+ }
+ }
+ return nil, ctx.Err()
+}
+
+func (lkv *leasingKV) get(ctx context.Context, op v3.Op) (*v3.GetResponse, error) {
+ do := func() (*v3.GetResponse, error) {
+ r, err := lkv.kv.Do(ctx, op)
+ return r.Get(), err
+ }
+ if !lkv.readySession() {
+ return do()
+ }
+
+ if resp, ok := lkv.leases.Get(ctx, op); resp != nil {
+ return resp, nil
+ } else if !ok || op.IsSerializable() {
+ // 必须是handled by etcd or can skip linearization
+ return do()
+ }
+
+ key := string(op.KeyBytes())
+ if !lkv.leases.MayAcquire(key) {
+ resp, err := lkv.kv.Do(ctx, op)
+ return resp.Get(), err
+ }
+
+ resp, err := lkv.acquire(ctx, key, v3.OpGet(key))
+ if err != nil {
+ return nil, err
+ }
+ getResp := (*v3.GetResponse)(resp.Responses[0].GetResponseRange())
+ getResp.Header = resp.Header
+ if resp.Succeeded {
+ getResp = lkv.leases.Add(key, getResp, op)
+ lkv.wg.Add(1)
+ go func() {
+ defer lkv.wg.Done()
+ lkv.monitorLease(ctx, key, resp.Header.Revision)
+ }()
+ }
+ return getResp, nil
+}
+
+func (lkv *leasingKV) deleteRangeRPC(ctx context.Context, maxLeaseRev int64, key, end string) (*v3.DeleteResponse, error) {
+ lkey, lend := lkv.pfx+key, lkv.pfx+end
+ resp, err := lkv.kv.Txn(ctx).If(
+ v3.Compare(v3.CreateRevision(lkey).WithRange(lend), "<", maxLeaseRev+1),
+ ).Then(
+ v3.OpGet(key, v3.WithRange(end), v3.WithKeysOnly()),
+ v3.OpDelete(key, v3.WithRange(end)),
+ ).Commit()
+ if err != nil {
+ lkv.leases.EvictRange(key, end)
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, nil
+ }
+ for _, kv := range resp.Responses[0].GetResponseRange().Kvs {
+ lkv.leases.Delete(string(kv.Key), resp.Header)
+ }
+ delResp := (*v3.DeleteResponse)(resp.Responses[1].GetResponseDeleteRange())
+ delResp.Header = resp.Header
+ return delResp, nil
+}
+
+func (lkv *leasingKV) deleteRange(ctx context.Context, op v3.Op) (*v3.DeleteResponse, error) {
+ key, end := string(op.KeyBytes()), string(op.RangeBytes())
+ for ctx.Err() == nil {
+ maxLeaseRev, err := lkv.revokeRange(ctx, key, end)
+ if err != nil {
+ return nil, err
+ }
+ wcs := lkv.leases.LockRange(key, end)
+ delResp, err := lkv.deleteRangeRPC(ctx, maxLeaseRev, key, end)
+ closeAll(wcs)
+ if err != nil || delResp != nil {
+ return delResp, err
+ }
+ }
+ return nil, ctx.Err()
+}
+
+func (lkv *leasingKV) delete(ctx context.Context, op v3.Op) (dr *v3.DeleteResponse, err error) {
+ if err := lkv.waitSession(ctx); err != nil {
+ return nil, err
+ }
+ if len(op.RangeBytes()) > 0 {
+ return lkv.deleteRange(ctx, op)
+ }
+ key := string(op.KeyBytes())
+ for ctx.Err() == nil {
+ resp, wc, err := lkv.tryModifyOp(ctx, op)
+ if err != nil || wc == nil {
+ resp, err = lkv.revoke(ctx, key, op)
+ }
+ if err != nil {
+ // don't know if delete was processed
+ lkv.leases.Evict(key)
+ return nil, err
+ }
+ if resp.Succeeded {
+ dr = (*v3.DeleteResponse)(resp.Responses[0].GetResponseDeleteRange())
+ dr.Header = resp.Header
+ lkv.leases.Delete(key, dr.Header)
+ }
+ if wc != nil {
+ close(wc)
+ }
+ if resp.Succeeded {
+ return dr, nil
+ }
+ }
+ return nil, ctx.Err()
+}
+
+func (lkv *leasingKV) revoke(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) {
+ rev := lkv.leases.Rev(key)
+ txn := lkv.kv.Txn(ctx).If(v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)).Then(op)
+ resp, err := txn.Else(v3.OpPut(lkv.pfx+key, "REVOKE", v3.WithIgnoreLease())).Commit()
+ if err != nil || resp.Succeeded {
+ return resp, err
+ }
+ return resp, lkv.waitRescind(ctx, key, resp.Header.Revision)
+}
+
+func (lkv *leasingKV) revokeRange(ctx context.Context, begin, end string) (int64, error) {
+ lkey, lend := lkv.pfx+begin, ""
+ if len(end) > 0 {
+ lend = lkv.pfx + end
+ }
+ leaseKeys, err := lkv.kv.Get(ctx, lkey, v3.WithRange(lend))
+ if err != nil {
+ return 0, err
+ }
+ return lkv.revokeLeaseKvs(ctx, leaseKeys.Kvs)
+}
+
+func (lkv *leasingKV) revokeLeaseKvs(ctx context.Context, kvs []*mvccpb.KeyValue) (int64, error) {
+ maxLeaseRev := int64(0)
+ for _, kv := range kvs {
+ if rev := kv.CreateRevision; rev > maxLeaseRev {
+ maxLeaseRev = rev
+ }
+ if v3.LeaseID(kv.Lease) == lkv.leaseID() {
+ // don't revoke own keys
+ continue
+ }
+ key := strings.TrimPrefix(string(kv.Key), lkv.pfx)
+ if _, err := lkv.revoke(ctx, key, v3.OpGet(key)); err != nil {
+ return 0, err
+ }
+ }
+ return maxLeaseRev, nil
+}
+
+func (lkv *leasingKV) waitSession(ctx context.Context) error {
+ lkv.leases.mu.RLock()
+ sessionc := lkv.sessionc
+ lkv.leases.mu.RUnlock()
+ select {
+ case <-sessionc:
+ return nil
+ case <-lkv.ctx.Done():
+ return lkv.ctx.Err()
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (lkv *leasingKV) readySession() bool {
+ lkv.leases.mu.RLock()
+ defer lkv.leases.mu.RUnlock()
+ if lkv.session == nil {
+ return false
+ }
+ select {
+ case <-lkv.session.Done():
+ default:
+ return true
+ }
+ return false
+}
+
+func (lkv *leasingKV) leaseID() v3.LeaseID {
+ lkv.leases.mu.RLock()
+ defer lkv.leases.mu.RUnlock()
+ return lkv.session.Lease()
+}
diff --git a/client_sdk/v3/leasing/txn.go b/client_sdk/v3/leasing/txn.go
new file mode 100644
index 00000000000..299331e1f01
--- /dev/null
+++ b/client_sdk/v3/leasing/txn.go
@@ -0,0 +1,223 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasing
+
+import (
+ "context"
+ "strings"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ v3pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+type txnLeasing struct {
+ v3.Txn
+ lkv *leasingKV
+ ctx context.Context
+ cs []v3.Cmp
+ opst []v3.Op
+ opse []v3.Op
+}
+
+func (txn *txnLeasing) If(cs ...v3.Cmp) v3.Txn {
+ txn.cs = append(txn.cs, cs...)
+ txn.Txn = txn.Txn.If(cs...)
+ return txn
+}
+
+func (txn *txnLeasing) Then(ops ...v3.Op) v3.Txn {
+ txn.opst = append(txn.opst, ops...)
+ txn.Txn = txn.Txn.Then(ops...)
+ return txn
+}
+
+func (txn *txnLeasing) Else(ops ...v3.Op) v3.Txn {
+ txn.opse = append(txn.opse, ops...)
+ txn.Txn = txn.Txn.Else(ops...)
+ return txn
+}
+
+func (txn *txnLeasing) Commit() (*v3.TxnResponse, error) {
+ if resp, err := txn.eval(); resp != nil || err != nil {
+ return resp, err
+ }
+ return txn.serverTxn()
+}
+
+func (txn *txnLeasing) eval() (*v3.TxnResponse, error) {
+ // TODO: wait on keys in comparisons
+ thenOps, elseOps := gatherOps(txn.opst), gatherOps(txn.opse)
+ ops := make([]v3.Op, 0, len(thenOps)+len(elseOps))
+ ops = append(ops, thenOps...)
+ ops = append(ops, elseOps...)
+
+ for _, ch := range txn.lkv.leases.NotifyOps(ops) {
+ select {
+ case <-ch:
+ case <-txn.ctx.Done():
+ return nil, txn.ctx.Err()
+ }
+ }
+
+ txn.lkv.leases.mu.RLock()
+ defer txn.lkv.leases.mu.RUnlock()
+ succeeded, ok := txn.lkv.leases.evalCmp(txn.cs)
+ if !ok || txn.lkv.leases.header == nil {
+ return nil, nil
+ }
+ if ops = txn.opst; !succeeded {
+ ops = txn.opse
+ }
+
+ resps, ok := txn.lkv.leases.evalOps(ops)
+ if !ok {
+ return nil, nil
+ }
+ return &v3.TxnResponse{Header: copyHeader(txn.lkv.leases.header), Succeeded: succeeded, Responses: resps}, nil
+}
+
+// fallback computes the ops to fetch all possible conflicting
+// leasing keys for a list of ops.
+func (txn *txnLeasing) fallback(ops []v3.Op) (fbOps []v3.Op) {
+ for _, op := range ops {
+ if op.IsGet() {
+ continue
+ }
+ lkey, lend := txn.lkv.pfx+string(op.KeyBytes()), ""
+ if len(op.RangeBytes()) > 0 {
+ lend = txn.lkv.pfx + string(op.RangeBytes())
+ }
+ fbOps = append(fbOps, v3.OpGet(lkey, v3.WithRange(lend)))
+ }
+ return fbOps
+}
+
+func (txn *txnLeasing) guardKeys(ops []v3.Op) (cmps []v3.Cmp) {
+ seen := make(map[string]bool)
+ for _, op := range ops {
+ key := string(op.KeyBytes())
+ if op.IsGet() || len(op.RangeBytes()) != 0 || seen[key] {
+ continue
+ }
+ rev := txn.lkv.leases.Rev(key)
+ cmps = append(cmps, v3.Compare(v3.CreateRevision(txn.lkv.pfx+key), "<", rev+1))
+ seen[key] = true
+ }
+ return cmps
+}
+
+func (txn *txnLeasing) guardRanges(ops []v3.Op) (cmps []v3.Cmp, err error) {
+ for _, op := range ops {
+ if op.IsGet() || len(op.RangeBytes()) == 0 {
+ continue
+ }
+
+ key, end := string(op.KeyBytes()), string(op.RangeBytes())
+ maxRevLK, err := txn.lkv.revokeRange(txn.ctx, key, end)
+ if err != nil {
+ return nil, err
+ }
+
+ opts := append(v3.WithLastRev(), v3.WithRange(end))
+ getResp, err := txn.lkv.kv.Get(txn.ctx, key, opts...)
+ if err != nil {
+ return nil, err
+ }
+ maxModRev := int64(0)
+ if len(getResp.Kvs) > 0 {
+ maxModRev = getResp.Kvs[0].ModRevision
+ }
+
+ noKeyUpdate := v3.Compare(v3.ModRevision(key).WithRange(end), "<", maxModRev+1)
+ noLeaseUpdate := v3.Compare(
+ v3.CreateRevision(txn.lkv.pfx+key).WithRange(txn.lkv.pfx+end),
+ "<",
+ maxRevLK+1)
+ cmps = append(cmps, noKeyUpdate, noLeaseUpdate)
+ }
+ return cmps, nil
+}
+
+func (txn *txnLeasing) guard(ops []v3.Op) ([]v3.Cmp, error) {
+ cmps := txn.guardKeys(ops)
+ rangeCmps, err := txn.guardRanges(ops)
+ return append(cmps, rangeCmps...), err
+}
+
+func (txn *txnLeasing) commitToCache(txnResp *v3pb.TxnResponse, userTxn v3.Op) {
+ ops := gatherResponseOps(txnResp.Responses, []v3.Op{userTxn})
+ txn.lkv.leases.mu.Lock()
+ for _, op := range ops {
+ key := string(op.KeyBytes())
+ if op.IsDelete() && len(op.RangeBytes()) > 0 {
+ end := string(op.RangeBytes())
+ for k := range txn.lkv.leases.entries {
+ if inRange(k, key, end) {
+ txn.lkv.leases.delete(k, txnResp.Header)
+ }
+ }
+ } else if op.IsDelete() {
+ txn.lkv.leases.delete(key, txnResp.Header)
+ }
+ if op.IsPut() {
+ txn.lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), txnResp.Header)
+ }
+ }
+ txn.lkv.leases.mu.Unlock()
+}
+
+func (txn *txnLeasing) revokeFallback(fbResps []*v3pb.ResponseOp) error {
+ for _, resp := range fbResps {
+ _, err := txn.lkv.revokeLeaseKvs(txn.ctx, resp.GetResponseRange().Kvs)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (txn *txnLeasing) serverTxn() (*v3.TxnResponse, error) {
+ if err := txn.lkv.waitSession(txn.ctx); err != nil {
+ return nil, err
+ }
+
+ userOps := gatherOps(append(txn.opst, txn.opse...))
+ userTxn := v3.OpTxn(txn.cs, txn.opst, txn.opse)
+ fbOps := txn.fallback(userOps)
+
+ defer closeAll(txn.lkv.leases.LockWriteOps(userOps))
+ for {
+ cmps, err := txn.guard(userOps)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := txn.lkv.kv.Txn(txn.ctx).If(cmps...).Then(userTxn).Else(fbOps...).Commit()
+ if err != nil {
+ for _, cmp := range cmps {
+ txn.lkv.leases.Evict(strings.TrimPrefix(string(cmp.Key), txn.lkv.pfx))
+ }
+ return nil, err
+ }
+ if resp.Succeeded {
+ txn.commitToCache((*v3pb.TxnResponse)(resp), userTxn)
+ userResp := resp.Responses[0].GetResponseTxn()
+ userResp.Header = resp.Header
+ return (*v3.TxnResponse)(userResp), nil
+ }
+ if err := txn.revokeFallback(resp.Responses); err != nil {
+ return nil, err
+ }
+ }
+}
diff --git a/client_sdk/v3/leasing/util.go b/client_sdk/v3/leasing/util.go
new file mode 100644
index 00000000000..030ee59b020
--- /dev/null
+++ b/client_sdk/v3/leasing/util.go
@@ -0,0 +1,108 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasing
+
+import (
+ "bytes"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ v3pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+func compareInt64(a, b int64) int {
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+}
+
+func evalCmp(resp *v3.GetResponse, tcmp v3.Cmp) bool {
+ var result int
+ if len(resp.Kvs) != 0 {
+ kv := resp.Kvs[0]
+ switch tcmp.Target {
+ case v3pb.Compare_VALUE:
+ if tcmp.Compare_Value != nil {
+ result = bytes.Compare([]byte(kv.Value), []byte(tcmp.Compare_Value.Value))
+ }
+ case v3pb.Compare_CREATE:
+ if tcmp.Compare_CreateRevision != nil {
+ result = compareInt64(kv.CreateRevision, tcmp.Compare_CreateRevision.CreateRevision)
+ }
+ case v3pb.Compare_MOD:
+ if tcmp.Compare_ModRevision != nil {
+ result = compareInt64(kv.ModRevision, tcmp.Compare_ModRevision.ModRevision)
+ }
+ case v3pb.Compare_VERSION:
+ if tcmp.Compare_Version != nil {
+ result = compareInt64(kv.Version, tcmp.Compare_Version.Version)
+ }
+ }
+ }
+ switch tcmp.Result {
+ case v3pb.Compare_EQUAL:
+ return result == 0
+ case v3pb.Compare_NOT_EQUAL:
+ return result != 0
+ case v3pb.Compare_GREATER:
+ return result > 0
+ case v3pb.Compare_LESS:
+ return result < 0
+ }
+ return true
+}
+
+func gatherOps(ops []v3.Op) (ret []v3.Op) {
+ for _, op := range ops {
+ if !op.IsTxn() {
+ ret = append(ret, op)
+ continue
+ }
+ _, thenOps, elseOps := op.Txn()
+ ret = append(ret, gatherOps(append(thenOps, elseOps...))...)
+ }
+ return ret
+}
+
+func gatherResponseOps(resp []*v3pb.ResponseOp, ops []v3.Op) (ret []v3.Op) {
+ for i, op := range ops {
+ if !op.IsTxn() {
+ ret = append(ret, op)
+ continue
+ }
+ _, thenOps, elseOps := op.Txn()
+ if txnResp := resp[i].GetResponseTxn(); txnResp.Succeeded {
+ ret = append(ret, gatherResponseOps(txnResp.Responses, thenOps)...)
+ } else {
+ ret = append(ret, gatherResponseOps(txnResp.Responses, elseOps)...)
+ }
+ }
+ return ret
+}
+
+func copyHeader(hdr *v3pb.ResponseHeader) *v3pb.ResponseHeader {
+ h := *hdr
+ return &h
+}
+
+func closeAll(chs []chan<- struct{}) {
+ for _, ch := range chs {
+ close(ch)
+ }
+}
diff --git a/client_sdk/v3/logger.go b/client_sdk/v3/logger.go
new file mode 100644
index 00000000000..ba79ca36632
--- /dev/null
+++ b/client_sdk/v3/logger.go
@@ -0,0 +1,77 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "log"
+ "os"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zapgrpc"
+ "google.golang.org/grpc/grpclog"
+)
+
+func init() {
+ // We override grpc logger only when the environment variable is set
+ // in order to not interfere by default with user's code or other libraries.
+ if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
+ lg, err := CreateDefaultZapLogger()
+ if err != nil {
+ panic(err)
+ }
+ grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
+ }
+}
+
+// SetLogger sets grpc logger.
+//
+// Deprecated: use grpclog.SetLoggerV2 directly or grpc_zap.ReplaceGrpcLoggerV2.
+func SetLogger(l grpclog.LoggerV2) {
+ grpclog.SetLoggerV2(l)
+}
+
+// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level.
+func etcdClientDebugLevel() zapcore.Level {
+ envLevel := os.Getenv("ETCD_CLIENT_DEBUG")
+ if envLevel == "" || envLevel == "true" {
+ return zapcore.InfoLevel
+ }
+ var l zapcore.Level
+ if err := l.Set(envLevel); err == nil {
+ log.Printf("Deprecated env ETCD_CLIENT_DEBUG value. Using default level: 'info'")
+ return zapcore.InfoLevel
+ }
+ return l
+}
+
+// CreateDefaultZapLoggerConfig creates a logger config that is configurable using env variable:
+// ETCD_CLIENT_DEBUG= debug|info|warn|error|dpanic|panic|fatal|true (true=info)
+func CreateDefaultZapLoggerConfig() zap.Config {
+ lcfg := logutil.DefaultZapLoggerConfig
+ lcfg.Level = zap.NewAtomicLevelAt(etcdClientDebugLevel())
+ return lcfg
+}
+
+// CreateDefaultZapLogger creates a logger that is configurable using env variable:
+// ETCD_CLIENT_DEBUG= debug|info|warn|error|dpanic|panic|fatal|true (true=info)
+func CreateDefaultZapLogger() (*zap.Logger, error) {
+ c, err := CreateDefaultZapLoggerConfig().Build()
+ if err != nil {
+ return nil, err
+ }
+ return c.Named("etcd-client"), nil
+}
diff --git a/client_sdk/v3/maintenance.go b/client_sdk/v3/maintenance.go
new file mode 100644
index 00000000000..a61456c8113
--- /dev/null
+++ b/client_sdk/v3/maintenance.go
@@ -0,0 +1,225 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+)
+
+type (
+ DefragmentResponse pb.DefragmentResponse
+ AlarmResponse pb.AlarmResponse
+ AlarmMember pb.AlarmMember
+ StatusResponse pb.StatusResponse
+ HashKVResponse pb.HashKVResponse
+ MoveLeaderResponse pb.MoveLeaderResponse
+)
+
+type Maintenance interface {
+ AlarmList(ctx context.Context) (*AlarmResponse, error) // 获取目前所有的警报
+ AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) // 解除警报
+ Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) // 碎片整理
+ Status(ctx context.Context, endpoint string) (*StatusResponse, error) // 获取端点的状态
+ HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) //
+ Snapshot(ctx context.Context) (io.ReadCloser, error) // 返回一个快照
+ MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) // leader 转移
+}
+
+type maintenance struct {
+ lg *zap.Logger
+ dial func(endpoint string) (pb.MaintenanceClient, func(), error)
+ remote pb.MaintenanceClient
+ callOpts []grpc.CallOption
+}
+
+func NewMaintenance(c *Client) Maintenance {
+ api := &maintenance{
+ lg: c.lg,
+ dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
+ conn, err := c.Dial(endpoint)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
+ }
+
+ // get token with established connection
+ dctx := c.ctx
+ cancel := func() {}
+ if c.cfg.DialTimeout > 0 {
+ dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+ }
+ err = c.getToken(dctx)
+ cancel()
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to getToken from endpoint %s with maintenance client: %v", endpoint, err)
+ }
+ cancel = func() { conn.Close() }
+ return RetryMaintenanceClient(c, conn), cancel, nil
+ },
+ remote: RetryMaintenanceClient(c, c.conn),
+ }
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
+ api := &maintenance{
+ lg: c.lg,
+ dial: func(string) (pb.MaintenanceClient, func(), error) {
+ return remote, func() {}, nil
+ },
+ remote: remote,
+ }
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+// AlarmList OK
+func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
+ req := &pb.AlarmRequest{
+ Action: pb.AlarmRequest_GET,
+ MemberID: 0, // all
+ Alarm: pb.AlarmType_NONE, // all
+ }
+ resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+ if err == nil {
+ return (*AlarmResponse)(resp), nil
+ }
+ return nil, toErr(ctx, err)
+}
+
+func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
+ req := &pb.AlarmRequest{
+ Action: pb.AlarmRequest_DEACTIVATE,
+ MemberID: am.MemberID,
+ Alarm: am.Alarm,
+ }
+
+ if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
+ ar, err := m.AlarmList(ctx)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ ret := AlarmResponse{}
+ for _, am := range ar.Alarms {
+ dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
+ if derr != nil {
+ return nil, toErr(ctx, derr)
+ }
+ ret.Alarms = append(ret.Alarms, dresp.Alarms...)
+ }
+ return &ret, nil
+ }
+
+ resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+ if err == nil {
+ return (*AlarmResponse)(resp), nil
+ }
+ return nil, toErr(ctx, err)
+}
+
+// Defragment 碎片整理
+func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
+ remote, cancel, err := m.dial(endpoint)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ defer cancel()
+ resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ return (*DefragmentResponse)(resp), nil
+}
+
+func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
+ remote, cancel, err := m.dial(endpoint)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ defer cancel()
+ resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ return (*StatusResponse)(resp), nil
+}
+
+func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
+ remote, cancel, err := m.dial(endpoint)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ defer cancel()
+ resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+ return (*HashKVResponse)(resp), nil
+}
+
+func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
+ ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
+ if err != nil {
+ return nil, toErr(ctx, err)
+ }
+
+ m.lg.Info("打开快照流;下载ing")
+ pr, pw := io.Pipe()
+ go func() {
+ for {
+ resp, err := ss.Recv()
+ if err != nil {
+ switch err {
+ case io.EOF:
+ m.lg.Info("快照读取完成;关闭ing")
+ default:
+ m.lg.Warn("从快照流接收失败;关闭ing", zap.Error(err))
+ }
+ pw.CloseWithError(err)
+ return
+ }
+ if _, werr := pw.Write(resp.Blob); werr != nil {
+ pw.CloseWithError(werr)
+ return
+ }
+ }
+ }()
+ return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
+}
+
+type snapshotReadCloser struct {
+ ctx context.Context
+ io.ReadCloser
+}
+
+func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
+ n, err = rc.ReadCloser.Read(p)
+ return n, toErr(rc.ctx, err)
+}
+
+func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
+ resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
+ return (*MoveLeaderResponse)(resp), toErr(ctx, err)
+}
diff --git a/client_sdk/v3/mirror/syncer.go b/client_sdk/v3/mirror/syncer.go
new file mode 100644
index 00000000000..73981011967
--- /dev/null
+++ b/client_sdk/v3/mirror/syncer.go
@@ -0,0 +1,102 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package mirror implements etcd mirroring operations.
+package mirror
+
+import (
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+)
+
+const (
+ batchLimit = 1000
+)
+
+type Syncer interface {
+ SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) // 同步k-v 状态.通过返回的chan发送.
+ SyncUpdates(ctx context.Context) clientv3.WatchChan // 在同步base数据后,同步增量数据
+}
+
+// NewSyncer 同步器
+func NewSyncer(c *clientv3.Client, prefix string, rev int64) Syncer {
+ return &syncer{c: c, prefix: prefix, rev: rev}
+}
+
+type syncer struct {
+ c *clientv3.Client
+ rev int64
+ prefix string
+}
+
+func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) {
+ respchan := make(chan clientv3.GetResponse, 1024)
+ errchan := make(chan error, 1)
+
+ // 如果没有指定rev,我们将选择最近的修订.
+ if s.rev == 0 {
+ resp, err := s.c.Get(ctx, "foo")
+ if err != nil {
+ errchan <- err
+ close(respchan)
+ close(errchan)
+ return respchan, errchan
+ }
+ s.rev = resp.Header.Revision
+ }
+
+ go func() {
+ defer close(respchan)
+ defer close(errchan)
+
+ var key string
+
+ opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev)}
+
+ if len(s.prefix) == 0 {
+ // 同步所有kv
+ opts = append(opts, clientv3.WithFromKey())
+ key = "\x00"
+ } else {
+ opts = append(opts, clientv3.WithRange(clientv3.GetPrefixRangeEnd(s.prefix)))
+ key = s.prefix
+ }
+
+ for {
+ resp, err := s.c.Get(ctx, key, opts...)
+ if err != nil {
+ errchan <- err
+ return
+ }
+
+ respchan <- *resp
+
+ if !resp.More {
+ return
+ }
+ // move to next key
+ key = string(append([]byte(resp.Kvs[len(resp.Kvs)-1].Key), 0))
+ }
+ }()
+
+ return respchan, errchan
+}
+
+func (s *syncer) SyncUpdates(ctx context.Context) clientv3.WatchChan {
+ if s.rev == 0 {
+ panic("unexpected revision = 0. Calling SyncUpdates before SyncBase finishes?")
+ }
+ return s.c.Watch(ctx, s.prefix, clientv3.WithPrefix(), clientv3.WithRev(s.rev+1))
+}
diff --git a/client_sdk/v3/mock/mockserver/doc.go b/client_sdk/v3/mock/mockserver/doc.go
new file mode 100644
index 00000000000..00c044c3aab
--- /dev/null
+++ b/client_sdk/v3/mock/mockserver/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package mockserver provides mock implementations for etcdserver's etcd interface.
+package mockserver
diff --git a/client/v3/mock/mockserver/mockserver.go b/client_sdk/v3/mock/mockserver/mockserver.go
similarity index 78%
rename from client/v3/mock/mockserver/mockserver.go
rename to client_sdk/v3/mock/mockserver/mockserver.go
index 837d45db175..d219f188764 100644
--- a/client/v3/mock/mockserver/mockserver.go
+++ b/client_sdk/v3/mock/mockserver/mockserver.go
@@ -17,17 +17,18 @@ package mockserver
import (
"context"
"fmt"
+ "io/ioutil"
"net"
"os"
"sync"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
"google.golang.org/grpc"
"google.golang.org/grpc/resolver"
)
-// MockServer provides a mocked out grpc server of the etcdserver interface.
+// MockServer provides a mocked out grpc etcd of the etcdserver interface.
type MockServer struct {
ln net.Listener
Network string
@@ -83,7 +84,7 @@ func startMockServersUnix(count int) (ms *MockServers, err error) {
dir := os.TempDir()
addrs := make([]string, 0, count)
for i := 0; i < count; i++ {
- f, err := os.CreateTemp(dir, "etcd-unix-so-")
+ f, err := ioutil.TempFile(dir, "etcd-unix-so-")
if err != nil {
return nil, fmt.Errorf("failed to allocate temp file for unix socket: %v", err)
}
@@ -118,7 +119,7 @@ func startMockServers(network string, addrs []string) (ms *MockServers, err erro
return ms, nil
}
-// StartAt restarts mock server at given index.
+// StartAt restarts mock etcd at given index.
func (ms *MockServers) StartAt(idx int) (err error) {
ms.mu.Lock()
defer ms.mu.Unlock()
@@ -132,7 +133,6 @@ func (ms *MockServers) StartAt(idx int) (err error) {
svr := grpc.NewServer()
pb.RegisterKVServer(svr, &mockKVServer{})
- pb.RegisterLeaseServer(svr, &mockLeaseServer{})
ms.Servers[idx].GrpcServer = svr
ms.wg.Add(1)
@@ -142,7 +142,7 @@ func (ms *MockServers) StartAt(idx int) (err error) {
return nil
}
-// StopAt stops mock server at given index.
+// StopAt stops mock etcd at given index.
func (ms *MockServers) StopAt(idx int) {
ms.mu.Lock()
defer ms.mu.Unlock()
@@ -157,7 +157,7 @@ func (ms *MockServers) StopAt(idx int) {
ms.wg.Done()
}
-// Stop stops the mock server, immediately closing all open connections and listeners.
+// Stop stops the mock etcd, immediately closing all open connections and listeners.
func (ms *MockServers) Stop() {
for idx := range ms.Servers {
ms.StopAt(idx)
@@ -186,29 +186,3 @@ func (m *mockKVServer) Txn(context.Context, *pb.TxnRequest) (*pb.TxnResponse, er
func (m *mockKVServer) Compact(context.Context, *pb.CompactionRequest) (*pb.CompactionResponse, error) {
return &pb.CompactionResponse{}, nil
}
-
-func (m *mockKVServer) Lease(context.Context, *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- return &pb.LeaseGrantResponse{}, nil
-}
-
-type mockLeaseServer struct{}
-
-func (s mockLeaseServer) LeaseGrant(context.Context, *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- return &pb.LeaseGrantResponse{}, nil
-}
-
-func (s *mockLeaseServer) LeaseRevoke(context.Context, *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- return &pb.LeaseRevokeResponse{}, nil
-}
-
-func (s *mockLeaseServer) LeaseKeepAlive(pb.Lease_LeaseKeepAliveServer) error {
- return nil
-}
-
-func (s *mockLeaseServer) LeaseTimeToLive(context.Context, *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
- return &pb.LeaseTimeToLiveResponse{}, nil
-}
-
-func (s *mockLeaseServer) LeaseLeases(context.Context, *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
- return &pb.LeaseLeasesResponse{}, nil
-}
diff --git a/client_sdk/v3/namespace/doc.go b/client_sdk/v3/namespace/doc.go
new file mode 100644
index 00000000000..01849b150ab
--- /dev/null
+++ b/client_sdk/v3/namespace/doc.go
@@ -0,0 +1,43 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package namespace is a clientv3 wrapper that translates all keys to begin
+// with a given prefix.
+//
+// First, create a client:
+//
+// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
+// if err != nil {
+// // handle error!
+// }
+//
+// Next, override the client interfaces:
+//
+// unprefixedKV := cli.KV
+// cli.KV = namespace.NewKV(cli.KV, "my-prefix/")
+// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/")
+// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/")
+//
+// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/":
+//
+// cli.Put(context.TODO(), "abc", "123")
+// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc")
+// fmt.Printf("%s\n", resp.Kvs[0].Value)
+// // Output: 123
+// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456")
+// resp, _ = cli.Get(context.TODO(), "abc")
+// fmt.Printf("%s\n", resp.Kvs[0].Value)
+// // Output: 456
+//
+package namespace
diff --git a/client_sdk/v3/namespace/kv.go b/client_sdk/v3/namespace/kv.go
new file mode 100644
index 00000000000..483970fb58d
--- /dev/null
+++ b/client_sdk/v3/namespace/kv.go
@@ -0,0 +1,213 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package namespace
+
+import (
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+)
+
+type kvPrefix struct {
+ clientv3.KV
+ pfx string
+}
+
+// NewKV wraps a KV instance so that all requests
+// are prefixed with a given string.
+func NewKV(kv clientv3.KV, prefix string) clientv3.KV {
+ return &kvPrefix{kv, prefix}
+}
+
+func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
+ if len(key) == 0 {
+ return nil, rpctypes.ErrEmptyKey
+ }
+ op := kv.prefixOp(clientv3.OpPut(key, val, opts...))
+ r, err := kv.KV.Do(ctx, op)
+ if err != nil {
+ return nil, err
+ }
+ put := r.Put()
+ kv.unprefixPutResponse(put)
+ return put, nil
+}
+
+func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
+ if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) {
+ return nil, rpctypes.ErrEmptyKey
+ }
+ r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...)))
+ if err != nil {
+ return nil, err
+ }
+ get := r.Get()
+ kv.unprefixGetResponse(get)
+ return get, nil
+}
+
+func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {
+ if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) {
+ return nil, rpctypes.ErrEmptyKey
+ }
+ r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...)))
+ if err != nil {
+ return nil, err
+ }
+ del := r.Del()
+ kv.unprefixDeleteResponse(del)
+ return del, nil
+}
+
+func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
+ if len(op.KeyBytes()) == 0 && !op.IsTxn() {
+ return clientv3.OpResponse{}, rpctypes.ErrEmptyKey
+ }
+ r, err := kv.KV.Do(ctx, kv.prefixOp(op))
+ if err != nil {
+ return r, err
+ }
+ switch {
+ case r.Get() != nil:
+ kv.unprefixGetResponse(r.Get())
+ case r.Put() != nil:
+ kv.unprefixPutResponse(r.Put())
+ case r.Del() != nil:
+ kv.unprefixDeleteResponse(r.Del())
+ case r.Txn() != nil:
+ kv.unprefixTxnResponse(r.Txn())
+ }
+ return r, nil
+}
+
+type txnPrefix struct {
+ clientv3.Txn
+ kv *kvPrefix
+}
+
+func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn {
+ return &txnPrefix{kv.KV.Txn(ctx), kv}
+}
+
+func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn {
+ txn.Txn = txn.Txn.If(txn.kv.prefixCmps(cs)...)
+ return txn
+}
+
+func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn {
+ txn.Txn = txn.Txn.Then(txn.kv.prefixOps(ops)...)
+ return txn
+}
+
+func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn {
+ txn.Txn = txn.Txn.Else(txn.kv.prefixOps(ops)...)
+ return txn
+}
+
+func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) {
+ resp, err := txn.Txn.Commit()
+ if err != nil {
+ return nil, err
+ }
+ txn.kv.unprefixTxnResponse(resp)
+ return resp, nil
+}
+
+func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op {
+ if !op.IsTxn() {
+ begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes())
+ op.WithKeyBytes(begin)
+ op.WithRangeBytes(end)
+ return op
+ }
+ cmps, thenOps, elseOps := op.Txn()
+ return clientv3.OpTxn(kv.prefixCmps(cmps), kv.prefixOps(thenOps), kv.prefixOps(elseOps))
+}
+
+func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) {
+ for i := range resp.Kvs {
+ resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):]
+ }
+}
+
+func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) {
+ if resp.PrevKv != nil {
+ resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):]
+ }
+}
+
+func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) {
+ for i := range resp.PrevKvs {
+ resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):]
+ }
+}
+
+func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) {
+ for _, r := range resp.Responses {
+ if r.ResponseOp_ResponseRange != nil {
+ tv := r.ResponseOp_ResponseRange
+ if tv.ResponseRange != nil {
+ kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange))
+ }
+ }
+ if r.ResponseOp_ResponsePut != nil {
+ tv := r.ResponseOp_ResponsePut
+ if tv.ResponsePut != nil {
+ kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut))
+ }
+ }
+
+ if r.ResponseOp_ResponseDeleteRange != nil {
+ tv := r.ResponseOp_ResponseDeleteRange
+ if tv.ResponseDeleteRange != nil {
+ kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange))
+ }
+ }
+ if r.ResponseOp_ResponseTxn != nil {
+ tv := r.ResponseOp_ResponseTxn
+ if tv.ResponseTxn != nil {
+ kv.unprefixTxnResponse((*clientv3.TxnResponse)(tv.ResponseTxn))
+ }
+ }
+
+ }
+}
+
+func (kv *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) {
+ return prefixInterval(kv.pfx, key, end)
+}
+
+func (kv *kvPrefix) prefixCmps(cs []clientv3.Cmp) []clientv3.Cmp {
+ newCmps := make([]clientv3.Cmp, len(cs))
+ for i := range cs {
+ newCmps[i] = cs[i]
+ pfxKey, endKey := kv.prefixInterval(cs[i].KeyBytes(), []byte(cs[i].RangeEnd))
+ newCmps[i].WithKeyBytes(pfxKey)
+ if len(cs[i].RangeEnd) != 0 {
+ newCmps[i].RangeEnd = string(endKey)
+ }
+ }
+ return newCmps
+}
+
+func (kv *kvPrefix) prefixOps(ops []clientv3.Op) []clientv3.Op {
+ newOps := make([]clientv3.Op, len(ops))
+ for i := range ops {
+ newOps[i] = kv.prefixOp(ops[i])
+ }
+ return newOps
+}
diff --git a/client_sdk/v3/namespace/lease.go b/client_sdk/v3/namespace/lease.go
new file mode 100644
index 00000000000..8a9b4176a2b
--- /dev/null
+++ b/client_sdk/v3/namespace/lease.go
@@ -0,0 +1,57 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package namespace
+
+import (
+ "bytes"
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+)
+
+type leasePrefix struct {
+ clientv3.Lease
+ pfx []byte
+}
+
+// NewLease wraps a Lease interface to filter for only keys with a prefix
+// and remove that prefix when fetching attached keys through TimeToLive.
+func NewLease(l clientv3.Lease, prefix string) clientv3.Lease {
+ return &leasePrefix{l, []byte(prefix)}
+}
+
+func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
+ resp, err := l.Lease.TimeToLive(ctx, id, opts...)
+ if err != nil {
+ return nil, err
+ }
+ if len(resp.Keys) > 0 {
+ var outKeys [][]byte
+ for i := range resp.Keys {
+ if len(resp.Keys[i]) < len(l.pfx) {
+ // too short
+ continue
+ }
+ if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) {
+ // doesn't match prefix
+ continue
+ }
+ // strip prefix
+ outKeys = append(outKeys, resp.Keys[i][len(l.pfx):])
+ }
+ resp.Keys = outKeys
+ }
+ return resp, nil
+}
diff --git a/client/v3/namespace/util.go b/client_sdk/v3/namespace/util.go
similarity index 100%
rename from client/v3/namespace/util.go
rename to client_sdk/v3/namespace/util.go
diff --git a/client_sdk/v3/namespace/watch.go b/client_sdk/v3/namespace/watch.go
new file mode 100644
index 00000000000..7836aba7bd2
--- /dev/null
+++ b/client_sdk/v3/namespace/watch.go
@@ -0,0 +1,84 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package namespace
+
+import (
+ "context"
+ "sync"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+)
+
+type watcherPrefix struct {
+ clientv3.Watcher
+ pfx string
+
+ wg sync.WaitGroup
+ stopc chan struct{}
+ stopOnce sync.Once
+}
+
+// NewWatcher wraps a Watcher instance so that all Watch requests
+// are prefixed with a given string and all Watch responses have
+// the prefix removed.
+func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher {
+ return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})}
+}
+
+// Watch ok
+func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
+ // since OpOption is opaque, determine range for prefixing through an OpGet
+ op := clientv3.OpGet(key, opts...)
+ end := op.RangeBytes()
+ pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end)
+ if pfxEnd != nil {
+ opts = append(opts, clientv3.WithRange(string(pfxEnd)))
+ }
+
+ wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...)
+
+ // 翻译watch事件从前缀到无前缀
+ pfxWch := make(chan clientv3.WatchResponse)
+ w.wg.Add(1)
+ go func() {
+ defer func() {
+ close(pfxWch)
+ w.wg.Done()
+ }()
+ for wr := range wch {
+ for i := range wr.Events {
+ wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):]
+ if wr.Events[i].PrevKv != nil {
+ wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key
+ }
+ }
+ select {
+ case pfxWch <- wr:
+ case <-ctx.Done():
+ return
+ case <-w.stopc:
+ return
+ }
+ }
+ }()
+ return pfxWch
+}
+
+func (w *watcherPrefix) Close() error {
+ err := w.Watcher.Close()
+ w.stopOnce.Do(func() { close(w.stopc) })
+ w.wg.Wait()
+ return err
+}
diff --git a/client_sdk/v3/naming/doc.go b/client_sdk/v3/naming/doc.go
new file mode 100644
index 00000000000..f80116f8902
--- /dev/null
+++ b/client_sdk/v3/naming/doc.go
@@ -0,0 +1,60 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package naming provides:
+// - subpackage endpoints: an abstraction layer to store and read endpoints
+// information from etcd.
+// - subpackage resolver: an etcd-backed gRPC resolver for discovering gRPC
+// services based on the endpoints configuration
+//
+// To use, first import the packages:
+//
+// import (
+// "github.com/ls-2018/etcd_cn/client_sdk/v3"
+// "github.com/ls-2018/etcd_cn/client_sdk/v3"
+// "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints"
+// "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/resolver"
+// "google.golang.org/grpc"
+// )
+//
+// First, register new endpoint addresses for a service:
+//
+// func etcdAdd(c *clientv3.Client, service, addr string) error {
+// em := endpoints.NewManager(c, service)
+// return em.AddEndpoint(c.Ctx(), service+"/"+addr, endpoints.Endpoint{Addr:addr});
+// }
+//
+// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer:
+//
+// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) {
+// etcdResolver, err := resolver.NewBuilder(c);
+// if err { return nil, err }
+// return grpc.Dial("etcd:///" + service, grpc.WithResolvers(etcdResolver))
+// }
+//
+// Optionally, force delete an endpoint:
+//
+// func etcdDelete(c *clientv3, service, addr string) error {
+// em := endpoints.NewManager(c, service)
+// return em.DeleteEndpoint(c.Ctx(), service+"/"+addr)
+// }
+//
+// Or register an expiring endpoint with a lease:
+//
+// func etcdAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error {
+// em := endpoints.NewManager(c, service)
+// return em.AddEndpoint(c.Ctx(), service+"/"+addr, endpoints.Endpoint{Addr:addr}, clientv3.WithLease(lid));
+// }
+//
+package naming
diff --git a/client/v3/naming/endpoints/endpoints.go b/client_sdk/v3/naming/endpoints/endpoints.go
similarity index 79%
rename from client/v3/naming/endpoints/endpoints.go
rename to client_sdk/v3/naming/endpoints/endpoints.go
index ffe77eff7b6..03b744e2b84 100644
--- a/client/v3/naming/endpoints/endpoints.go
+++ b/client_sdk/v3/naming/endpoints/endpoints.go
@@ -1,23 +1,9 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
package endpoints
import (
"context"
- clientv3 "go.etcd.io/etcd/client/v3"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
)
// Endpoint represents a single address the connection can be established with.
@@ -25,7 +11,7 @@ import (
// Inspired by: https://pkg.go.dev/google.golang.org/grpc/resolver#Address.
// Please document etcd version since which version each field is supported.
type Endpoint struct {
- // Addr is the server address on which a connection will be established.
+ // Addr is the etcd address on which a connection will be established.
// Since etcd 3.1
Addr string
diff --git a/client/v3/naming/endpoints/endpoints_impl.go b/client_sdk/v3/naming/endpoints/endpoints_impl.go
similarity index 79%
rename from client/v3/naming/endpoints/endpoints_impl.go
rename to client_sdk/v3/naming/endpoints/endpoints_impl.go
index f88a3eed13f..2460f6d1cfb 100644
--- a/client/v3/naming/endpoints/endpoints_impl.go
+++ b/client_sdk/v3/naming/endpoints/endpoints_impl.go
@@ -1,17 +1,3 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
package endpoints
// TODO: The API is not yet implemented.
@@ -22,8 +8,8 @@ import (
"errors"
"strings"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/naming/endpoints/internal"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints/internal"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
@@ -92,8 +78,7 @@ func (m *endpointManager) DeleteEndpoint(ctx context.Context, key string, opts .
}
func (m *endpointManager) NewWatchChannel(ctx context.Context) (WatchChannel, error) {
- key := m.target + "/"
- resp, err := m.client.Get(ctx, key, clientv3.WithPrefix(), clientv3.WithSerializable())
+ resp, err := m.client.Get(ctx, m.target, clientv3.WithPrefix(), clientv3.WithSerializable())
if err != nil {
return nil, err
}
@@ -102,7 +87,7 @@ func (m *endpointManager) NewWatchChannel(ctx context.Context) (WatchChannel, er
initUpdates := make([]*Update, 0, len(resp.Kvs))
for _, kv := range resp.Kvs {
var iup internal.Update
- if err := json.Unmarshal(kv.Value, &iup); err != nil {
+ if err := json.Unmarshal([]byte(kv.Value), &iup); err != nil {
lg.Warn("unmarshal endpoint update failed", zap.String("key", string(kv.Key)), zap.Error(err))
continue
}
@@ -127,8 +112,7 @@ func (m *endpointManager) watch(ctx context.Context, rev int64, upch chan []*Upd
lg := m.client.GetLogger()
opts := []clientv3.OpOption{clientv3.WithRev(rev), clientv3.WithPrefix()}
- key := m.target + "/"
- wch := m.client.Watch(ctx, key, opts...)
+ wch := m.client.Watch(ctx, m.target, opts...)
for {
select {
case <-ctx.Done():
@@ -150,7 +134,7 @@ func (m *endpointManager) watch(ctx context.Context, rev int64, upch chan []*Upd
var op Operation
switch e.Type {
case clientv3.EventTypePut:
- err = json.Unmarshal(e.Kv.Value, &iup)
+ err = json.Unmarshal([]byte(e.Kv.Value), &iup)
op = Add
if err != nil {
lg.Warn("unmarshal endpoint update failed", zap.String("key", string(e.Kv.Key)), zap.Error(err))
@@ -173,8 +157,7 @@ func (m *endpointManager) watch(ctx context.Context, rev int64, upch chan []*Upd
}
func (m *endpointManager) List(ctx context.Context) (Key2EndpointMap, error) {
- key := m.target + "/"
- resp, err := m.client.Get(ctx, key, clientv3.WithPrefix(), clientv3.WithSerializable())
+ resp, err := m.client.Get(ctx, m.target, clientv3.WithPrefix(), clientv3.WithSerializable())
if err != nil {
return nil, err
}
@@ -182,7 +165,7 @@ func (m *endpointManager) List(ctx context.Context) (Key2EndpointMap, error) {
eps := make(Key2EndpointMap)
for _, kv := range resp.Kvs {
var iup internal.Update
- if err := json.Unmarshal(kv.Value, &iup); err != nil {
+ if err := json.Unmarshal([]byte(kv.Value), &iup); err != nil {
continue
}
diff --git a/client_sdk/v3/naming/endpoints/internal/update.go b/client_sdk/v3/naming/endpoints/internal/update.go
new file mode 100644
index 00000000000..08d957f443e
--- /dev/null
+++ b/client_sdk/v3/naming/endpoints/internal/update.go
@@ -0,0 +1,38 @@
+package internal
+
+// Operation describes action performed on endpoint (addition vs deletion).
+// Must stay JSON-format compatible with:
+// https://pkg.go.dev/google.golang.org/grpc@v1.29.1/naming#Operation
+type Operation uint8
+
+const (
+ // Add indicates a new address is added.
+ Add Operation = iota
+ // Delete indicates an existing address is deleted.
+ Delete
+)
+
+// Update defines a persistent (JSON marshalled) format representing
+// endpoint within the etcd storage.
+//
+// As the format can be persisted by one version of etcd client library and
+// read by other the format必须是kept backward compatible and
+// in particular必须是superset of the grpc(<=1.29.1) naming.Update structure:
+// https://pkg.go.dev/google.golang.org/grpc@v1.29.1/naming#Update
+//
+// Please document since which version of etcd-client given property is supported.
+// Please keep the naming consistent with e.g. https://pkg.go.dev/google.golang.org/grpc/resolver#Address.
+//
+// Notice that it is not valid having both empty string Addr and nil Metadata in an Update.
+type Update struct {
+ // Op indicates the operation of the update.
+ // Since etcd 3.1.
+ Op Operation
+ // Addr is the updated address. It is empty string if there is no address update.
+ // Since etcd 3.1.
+ Addr string
+ // Metadata is the updated metadata. It is nil if there is no metadata update.
+ // Metadata is not required for a custom naming implementation.
+ // Since etcd 3.1.
+ Metadata interface{}
+}
diff --git a/client_sdk/v3/naming/resolver/resolver.go b/client_sdk/v3/naming/resolver/resolver.go
new file mode 100644
index 00000000000..d208d35133b
--- /dev/null
+++ b/client_sdk/v3/naming/resolver/resolver.go
@@ -0,0 +1,107 @@
+package resolver
+
+import (
+ "context"
+ "sync"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints"
+
+ "google.golang.org/grpc/codes"
+ gresolver "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/status"
+)
+
+type builder struct {
+ c *clientv3.Client
+}
+
+func (b builder) Build(target gresolver.Target, cc gresolver.ClientConn, opts gresolver.BuildOptions) (gresolver.Resolver, error) {
+ r := &resolver{
+ c: b.c,
+ target: target.Endpoint,
+ cc: cc,
+ }
+ r.ctx, r.cancel = context.WithCancel(context.Background())
+
+ em, err := endpoints.NewManager(r.c, r.target)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "resolver: failed to new endpoint manager: %s", err)
+ }
+ r.wch, err = em.NewWatchChannel(r.ctx)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "resolver: failed to new watch channer: %s", err)
+ }
+
+ r.wg.Add(1)
+ go r.watch()
+ return r, nil
+}
+
+func (b builder) Scheme() string {
+ return "etcd"
+}
+
+// NewBuilder creates a resolver builder.
+func NewBuilder(client *clientv3.Client) (gresolver.Builder, error) {
+ return builder{c: client}, nil
+}
+
+type resolver struct {
+ c *clientv3.Client
+ target string
+ cc gresolver.ClientConn
+ wch endpoints.WatchChannel
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+}
+
+func (r *resolver) watch() {
+ defer r.wg.Done()
+
+ allUps := make(map[string]*endpoints.Update)
+ for {
+ select {
+ case <-r.ctx.Done():
+ return
+ case ups, ok := <-r.wch:
+ if !ok {
+ return
+ }
+
+ for _, up := range ups {
+ switch up.Op {
+ case endpoints.Add:
+ allUps[up.Key] = up
+ case endpoints.Delete:
+ delete(allUps, up.Key)
+ }
+ }
+
+ addrs := convertToGRPCAddress(allUps)
+ r.cc.UpdateState(gresolver.State{Addresses: addrs})
+ }
+ }
+}
+
+func convertToGRPCAddress(ups map[string]*endpoints.Update) []gresolver.Address {
+ var addrs []gresolver.Address
+ for _, up := range ups {
+ addr := gresolver.Address{
+ Addr: up.Endpoint.Addr,
+ Metadata: up.Endpoint.Metadata,
+ }
+ addrs = append(addrs, addr)
+ }
+ return addrs
+}
+
+// ResolveNow is a no-op here.
+// It's just a hint, resolver can ignore this if it's not necessary.
+func (r *resolver) ResolveNow(gresolver.ResolveNowOptions) {}
+
+func (r *resolver) Close() {
+ r.cancel()
+ r.wg.Wait()
+}
diff --git a/client/v3/op.go b/client_sdk/v3/op.go
similarity index 80%
rename from client/v3/op.go
rename to client_sdk/v3/op.go
index 6193d090a91..d11a40f3ebd 100644
--- a/client/v3/op.go
+++ b/client_sdk/v3/op.go
@@ -14,7 +14,11 @@
package clientv3
-import pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+import (
+ "fmt"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
type opType int
@@ -28,12 +32,10 @@ const (
var noPrefixEnd = []byte{0}
-// Op represents an Operation that kv can execute.
type Op struct {
t opType
- key []byte
- end []byte
-
+ key string
+ end string
// for range
limit int64
sort *SortOption
@@ -57,29 +59,22 @@ type Op struct {
// "--max-request-bytes" flag value + 512-byte
fragment bool
- // for put
ignoreValue bool
ignoreLease bool
- // progressNotify is for progress updates.
- progressNotify bool
- // createdNotify is for created event
- createdNotify bool
- // filters for watchers
- filterPut bool
- filterDelete bool
+ progressNotify bool // 处理更新
+ createdNotify bool // 创建事件
+ filterPut bool // 过滤掉put事件
+ filterDelete bool // 过滤掉delete事件
// for put
- val []byte
+ val string
leaseID LeaseID
// txn
cmps []Cmp
thenOps []Op
elseOps []Op
-
- isOptsWithFromKey bool
- isOptsWithPrefix bool
}
// accessors / mutators
@@ -95,13 +90,13 @@ func (op Op) Txn() ([]Cmp, []Op, []Op) {
}
// KeyBytes returns the byte slice holding the Op's key.
-func (op Op) KeyBytes() []byte { return op.key }
+func (op Op) KeyBytes() []byte { return []byte(op.key) }
// WithKeyBytes sets the byte slice for the Op's key.
-func (op *Op) WithKeyBytes(key []byte) { op.key = key }
+func (op *Op) WithKeyBytes(key []byte) { op.key = string(key) }
// RangeBytes returns the byte slice holding with the Op's range end, if any.
-func (op Op) RangeBytes() []byte { return op.end }
+func (op Op) RangeBytes() []byte { return []byte(op.end) }
// Rev returns the requested revision, if any.
func (op Op) Rev() int64 { return op.rev }
@@ -137,13 +132,15 @@ func (op Op) MinCreateRev() int64 { return op.minCreateRev }
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
// WithRangeBytes sets the byte slice for the Op's range end.
-func (op *Op) WithRangeBytes(end []byte) { op.end = end }
+func (op *Op) WithRangeBytes(end []byte) {
+ op.end = string(end)
+}
// ValueBytes returns the byte slice holding the Op's value, if any.
-func (op Op) ValueBytes() []byte { return op.val }
+func (op Op) ValueBytes() []byte { return []byte(op.val) }
// WithValueBytes sets the byte slice for the Op's value.
-func (op *Op) WithValueBytes(v []byte) { op.val = v }
+func (op *Op) WithValueBytes(v []byte) { op.val = string(v) }
func (op Op) toRangeRequest() *pb.RangeRequest {
if op.t != tRange {
@@ -188,15 +185,16 @@ func (op Op) toTxnRequest() *pb.TxnRequest {
func (op Op) toRequestOp() *pb.RequestOp {
switch op.t {
case tRange:
- return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
+ return &pb.RequestOp{RequestOp_RequestRange: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
case tPut:
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
- return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
+ return &pb.RequestOp{RequestOp_RequestPut: &pb.RequestOp_RequestPut{RequestPut: r}}
case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
- return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
+ fmt.Println("----->", r)
+ return &pb.RequestOp{RequestOp_RequestDeleteRange: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
case tTxn:
- return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
+ return &pb.RequestOp{RequestOp_RequestTxn: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
default:
panic("Unknown Op")
}
@@ -219,17 +217,13 @@ func (op Op) isWrite() bool {
return op.t != tRange
}
-func NewOp() *Op {
- return &Op{key: []byte("")}
-}
-
// OpGet returns "get" operation based on given key and operation options.
func OpGet(key string, opts ...OpOption) Op {
// WithPrefix and WithFromKey are not supported together
if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) {
panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
}
- ret := Op{t: tRange, key: []byte(key)}
+ ret := Op{t: tRange, key: key}
ret.applyOpts(opts)
return ret
}
@@ -240,7 +234,7 @@ func OpDelete(key string, opts ...OpOption) Op {
if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) {
panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
}
- ret := Op{t: tDeleteRange, key: []byte(key)}
+ ret := Op{t: tDeleteRange, key: key}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
@@ -269,10 +263,10 @@ func OpDelete(key string, opts ...OpOption) Op {
// OpPut returns "put" operation based on given key-value and operation options.
func OpPut(key, val string, opts ...OpOption) Op {
- ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
+ ret := Op{t: tPut, key: key, val: val}
ret.applyOpts(opts)
switch {
- case ret.end != nil:
+ case ret.end != "":
panic("unexpected range in put")
case ret.limit != 0:
panic("unexpected limit in put")
@@ -301,24 +295,25 @@ func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
}
+// 检查watch请求
func opWatch(key string, opts ...OpOption) Op {
- ret := Op{t: tRange, key: []byte(key)}
+ ret := Op{t: tRange, key: key}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
- panic("unexpected lease in watch")
+ panic("unexpected watch中不能有租约")
case ret.limit != 0:
- panic("unexpected limit in watch")
+ panic("unexpected watch中不能有limit")
case ret.sort != nil:
- panic("unexpected sort in watch")
+ panic("unexpected watch中不能有sort")
case ret.serializable:
- panic("unexpected serializable in watch")
+ panic("unexpected watch中不能有 serializable")
case ret.countOnly:
- panic("unexpected countOnly in watch")
+ panic("unexpected watch中不能有countOnly")
case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in watch")
+ panic("unexpected watch中不能过滤修订版本")
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in watch")
+ panic("unexpected watch中不能过滤创建时的修订版本")
}
return ret
}
@@ -337,8 +332,7 @@ func WithLease(leaseID LeaseID) OpOption {
return func(op *Op) { op.leaseID = leaseID }
}
-// WithLimit limits the number of results to return from 'Get' request.
-// If WithLimit is given a 0 limit, it is treated as no limit.
+// WithLimit 限制从'Get'请求返回的结果的数量.如果给WithLimit一个0限制,则它被视为没有限制.
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
// WithRev specifies the store revision for 'Get' request.
@@ -352,9 +346,9 @@ func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
func WithSort(target SortTarget, order SortOrder) OpOption {
return func(op *Op) {
if target == SortByKey && order == SortAscend {
- // If order != SortNone, server fetches the entire key-space,
+ // If order != SortNone, etcd fetches the entire key-space,
// and then applies the sort and limit, if provided.
- // Since by default the server returns results sorted by keys
+ // Since by default the etcd returns results sorted by keys
// in lexicographically ascending order, the client should ignore
// SortOrder if the target is SortByKey.
order = SortNone
@@ -363,25 +357,25 @@ func WithSort(target SortTarget, order SortOrder) OpOption {
}
}
-// GetPrefixRangeEnd gets the range end of the prefix.
-// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
+// GetPrefixRangeEnd 得到前缀的范围端.
+// 例如 1 ---> [1,2)
func GetPrefixRangeEnd(prefix string) string {
- return string(getPrefix([]byte(prefix)))
+ return getPrefix(prefix)
}
-func getPrefix(key []byte) []byte {
+func getPrefix(key string) string {
end := make([]byte, len(key))
copy(end, key)
for i := len(end) - 1; i >= 0; i-- {
if end[i] < 0xff {
end[i] = end[i] + 1
end = end[:i+1]
- return end
+ return string(end)
}
}
// next prefix does not exist (e.g., 0xffff);
// default to WithFromKey policy
- return noPrefixEnd
+ return string(noPrefixEnd)
}
// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
@@ -389,9 +383,8 @@ func getPrefix(key []byte) []byte {
// can return 'foo1', 'foo2', and so on.
func WithPrefix() OpOption {
return func(op *Op) {
- op.isOptsWithPrefix = true
if len(op.key) == 0 {
- op.key, op.end = []byte{0}, []byte{0}
+ op.key, op.end = string([]byte{0}), string([]byte{0})
return
}
op.end = getPrefix(op.key)
@@ -401,9 +394,9 @@ func WithPrefix() OpOption {
// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
// For example, 'Get' requests with 'WithRange(end)' returns
// the keys in the range [key, end).
-// endKey must be lexicographically greater than start key.
+// endKey必须是lexicographically greater than start key.
func WithRange(endKey string) OpOption {
- return func(op *Op) { op.end = []byte(endKey) }
+ return func(op *Op) { op.end = endKey }
}
// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
@@ -411,10 +404,9 @@ func WithRange(endKey string) OpOption {
func WithFromKey() OpOption {
return func(op *Op) {
if len(op.key) == 0 {
- op.key = []byte{0}
+ op.key = string([]byte{0})
}
- op.end = []byte("\x00")
- op.isOptsWithFromKey = true
+ op.end = string([]byte("\x00"))
}
}
@@ -448,13 +440,10 @@ func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRe
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
-// WithFirstCreate gets the key with the oldest creation revision in the request range.
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
-// WithLastCreate gets the key with the latest creation revision in the request range.
func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
-// WithFirstKey gets the lexically first key in the request range.
func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
// WithLastKey gets the lexically last key in the request range.
@@ -471,7 +460,7 @@ func withTop(target SortTarget, order SortOrder) []OpOption {
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
}
-// WithProgressNotify makes watch server send periodic progress updates
+// WithProgressNotify makes watch etcd send periodic progress updates
// every 10 minutes when there is no incoming events.
// Progress updates have zero events in WatchResponse.
func WithProgressNotify() OpOption {
@@ -480,7 +469,7 @@ func WithProgressNotify() OpOption {
}
}
-// WithCreatedNotify makes watch server sends the created event.
+// WithCreatedNotify makes watch etcd sends the created event.
func WithCreatedNotify() OpOption {
return func(op *Op) {
op.createdNotify = true
@@ -507,11 +496,11 @@ func WithPrevKV() OpOption {
// WithFragment to receive raw watch response with fragmentation.
// Fragmentation is disabled by default. If fragmentation is enabled,
-// etcd watch server will split watch response before sending to clients
-// when the total size of watch events exceed server-side request limit.
-// The default server-side request limit is 1.5 MiB, which can be configured
+// etcd watch etcd will split watch response before sending to clients
+// when the total size of watch events exceed etcd-side request limit.
+// The default etcd-side request limit is 1.5 MiB, which can be configured
// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
-// See "etcdserver/api/v3rpc/watch.go" for more details.
+// See "etcdserver/api/v3rpc/over_watch.go" for more details.
func WithFragment() OpOption {
return func(op *Op) { op.fragment = true }
}
@@ -563,37 +552,7 @@ func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLi
}
// IsOptsWithPrefix returns true if WithPrefix option is called in the given opts.
-func IsOptsWithPrefix(opts []OpOption) bool {
- ret := NewOp()
- for _, opt := range opts {
- opt(ret)
- }
-
- return ret.isOptsWithPrefix
-}
+func IsOptsWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) }
// IsOptsWithFromKey returns true if WithFromKey option is called in the given opts.
-func IsOptsWithFromKey(opts []OpOption) bool {
- ret := NewOp()
- for _, opt := range opts {
- opt(ret)
- }
-
- return ret.isOptsWithFromKey
-}
-
-func (op Op) IsSortOptionValid() bool {
- if op.sort != nil {
- sortOrder := int32(op.sort.Order)
- sortTarget := int32(op.sort.Target)
-
- if _, ok := pb.RangeRequest_SortOrder_name[sortOrder]; !ok {
- return false
- }
-
- if _, ok := pb.RangeRequest_SortTarget_name[sortTarget]; !ok {
- return false
- }
- }
- return true
-}
+func IsOptsWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) }
diff --git a/client_sdk/v3/options.go b/client_sdk/v3/options.go
new file mode 100644
index 00000000000..d5916e9c210
--- /dev/null
+++ b/client_sdk/v3/options.go
@@ -0,0 +1,45 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "math"
+ "time"
+
+ "google.golang.org/grpc"
+)
+
+var (
+ // client-side handling retrying of request failures where data was not written to the wire or
+ // where etcd indicates it did not process the data. gRPC default is default is "WaitForReady(false)"
+ // but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to
+ // transient failures.
+ defaultWaitForReady = grpc.WaitForReady(true)
+ defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
+ defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
+ defaultUnaryMaxRetries uint = 100
+ defaultStreamMaxRetries = ^uint(0) // max uint
+ defaultBackoffWaitBetween = 25 * time.Millisecond // 重试间隔
+
+ // client-side retry backoff default jitter fraction.
+ defaultBackoffJitterFraction = 0.10
+)
+
+// "clientv3.Config" 默认的 "gRPC.CallOption".
+var defaultCallOpts = []grpc.CallOption{
+ defaultWaitForReady,
+ defaultMaxCallSendMsgSize,
+ defaultMaxCallRecvMsgSize,
+}
diff --git a/client_sdk/v3/ordering/doc.go b/client_sdk/v3/ordering/doc.go
new file mode 100644
index 00000000000..856f3305801
--- /dev/null
+++ b/client_sdk/v3/ordering/doc.go
@@ -0,0 +1,42 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ordering is a clientv3 wrapper that caches response header revisions
+// to detect ordering violations from stale responses. Users may define a
+// policy on how to handle the ordering violation, but typically the client
+// should connect to another endpoint and reissue the request.
+//
+// The most common situation where an ordering violation happens is a client
+// reconnects to a partitioned member and issues a serializable read. Since the
+// partitioned member is likely behind the last member, it may return a Get
+// response based on a store revision older than the store revision used to
+// service a prior Get on the former endpoint.
+//
+// First, create a client:
+//
+// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
+// if err != nil {
+// // handle error!
+// }
+//
+// Next, override the client interface with the ordering wrapper:
+//
+// vf := func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
+// return fmt.Errorf("ordering: issued %+v, got %+v, expected rev=%v", op, resp, prevRev)
+// }
+// cli.KV = ordering.NewKV(cli.KV, vf)
+//
+// Now calls using 'cli' will reject order violations with an error.
+//
+package ordering
diff --git a/client_sdk/v3/ordering/kv.go b/client_sdk/v3/ordering/kv.go
new file mode 100644
index 00000000000..139b6c87021
--- /dev/null
+++ b/client_sdk/v3/ordering/kv.go
@@ -0,0 +1,149 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ordering
+
+import (
+ "context"
+ "sync"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+)
+
+// kvOrdering ensures that serialized requests do not return
+// get with revisions less than the previous
+// returned revision.
+type kvOrdering struct {
+ clientv3.KV
+ orderViolationFunc OrderViolationFunc
+ prevRev int64
+ revMu sync.RWMutex
+}
+
+func NewKV(kv clientv3.KV, orderViolationFunc OrderViolationFunc) *kvOrdering {
+ return &kvOrdering{kv, orderViolationFunc, 0, sync.RWMutex{}}
+}
+
+func (kv *kvOrdering) getPrevRev() int64 {
+ kv.revMu.RLock()
+ defer kv.revMu.RUnlock()
+ return kv.prevRev
+}
+
+func (kv *kvOrdering) setPrevRev(currRev int64) {
+ kv.revMu.Lock()
+ defer kv.revMu.Unlock()
+ if currRev > kv.prevRev {
+ kv.prevRev = currRev
+ }
+}
+
+func (kv *kvOrdering) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
+ // prevRev is stored in a local variable in order to record the prevRev
+ // at the beginning of the Get operation, because concurrent
+ // access to kvOrdering could change the prevRev field in the
+ // middle of the Get operation.
+ prevRev := kv.getPrevRev()
+ op := clientv3.OpGet(key, opts...)
+ for {
+ r, err := kv.KV.Do(ctx, op)
+ if err != nil {
+ return nil, err
+ }
+ resp := r.Get()
+ if resp.Header.Revision == prevRev {
+ return resp, nil
+ } else if resp.Header.Revision > prevRev {
+ kv.setPrevRev(resp.Header.Revision)
+ return resp, nil
+ }
+ err = kv.orderViolationFunc(op, r, prevRev)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+func (kv *kvOrdering) Txn(ctx context.Context) clientv3.Txn {
+ return &txnOrdering{
+ kv.KV.Txn(ctx),
+ kv,
+ ctx,
+ sync.Mutex{},
+ []clientv3.Cmp{},
+ []clientv3.Op{},
+ []clientv3.Op{},
+ }
+}
+
+// txnOrdering ensures that serialized requests do not return
+// txn responses with revisions less than the previous
+// returned revision.
+type txnOrdering struct {
+ clientv3.Txn
+ *kvOrdering
+ ctx context.Context
+ mu sync.Mutex
+ cmps []clientv3.Cmp
+ thenOps []clientv3.Op
+ elseOps []clientv3.Op
+}
+
+func (txn *txnOrdering) If(cs ...clientv3.Cmp) clientv3.Txn {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+ txn.cmps = cs
+ txn.Txn.If(cs...)
+ return txn
+}
+
+func (txn *txnOrdering) Then(ops ...clientv3.Op) clientv3.Txn {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+ txn.thenOps = ops
+ txn.Txn.Then(ops...)
+ return txn
+}
+
+func (txn *txnOrdering) Else(ops ...clientv3.Op) clientv3.Txn {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+ txn.elseOps = ops
+ txn.Txn.Else(ops...)
+ return txn
+}
+
+func (txn *txnOrdering) Commit() (*clientv3.TxnResponse, error) {
+ // prevRev is stored in a local variable in order to record the prevRev
+ // at the beginning of the Commit operation, because concurrent
+ // access to txnOrdering could change the prevRev field in the
+ // middle of the Commit operation.
+ prevRev := txn.getPrevRev()
+ opTxn := clientv3.OpTxn(txn.cmps, txn.thenOps, txn.elseOps)
+ for {
+ opResp, err := txn.KV.Do(txn.ctx, opTxn)
+ if err != nil {
+ return nil, err
+ }
+ txnResp := opResp.Txn()
+ if txnResp.Header.Revision >= prevRev {
+ txn.setPrevRev(txnResp.Header.Revision)
+ return txnResp, nil
+ }
+ err = txn.orderViolationFunc(opTxn, opResp, prevRev)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
diff --git a/client_sdk/v3/ordering/util.go b/client_sdk/v3/ordering/util.go
new file mode 100644
index 00000000000..ce7f3deac44
--- /dev/null
+++ b/client_sdk/v3/ordering/util.go
@@ -0,0 +1,42 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ordering
+
+import (
+ "errors"
+ "sync/atomic"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3"
+)
+
+type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error
+
+var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision")
+
+func NewOrderViolationSwitchEndpointClosure(c *clientv3.Client) OrderViolationFunc {
+ violationCount := int32(0)
+ return func(_ clientv3.Op, _ clientv3.OpResponse, _ int64) error {
+ // Each request is assigned by round-robin load-balancer's picker to a different
+ // endpoints. If we cycled them 5 times (even with some level of concurrency),
+ // with high probability no endpoint points on a member with fresh data.
+ // TODO: Ideally we should track members (resp.opp.Header) that returned
+ // stale result and explicitly temporarily disable them in 'picker'.
+ if atomic.LoadInt32(&violationCount) > int32(5*len(c.Endpoints())) {
+ return ErrNoGreaterRev
+ }
+ atomic.AddInt32(&violationCount, 1)
+ return nil
+ }
+}
diff --git a/client/v3/retry.go b/client_sdk/v3/retry.go
similarity index 98%
rename from client/v3/retry.go
rename to client_sdk/v3/retry.go
index 69ecc631471..6f0616a4952 100644
--- a/client/v3/retry.go
+++ b/client_sdk/v3/retry.go
@@ -17,8 +17,8 @@ package clientv3
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@@ -46,14 +46,14 @@ func (rp retryPolicy) String() string {
// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
//
// immutable requests (e.g. Get) should be retried unless it's
-// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
+// an obvious etcd-side error (e.g. rpctypes.ErrRequestTooLarge).
//
// Returning "false" means retry should stop, since client cannot
// handle itself even with retries.
func isSafeRetryImmutableRPC(err error) bool {
eErr := rpctypes.Error(err)
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
- // interrupted by non-transient server-side or gRPC-side error
+ // interrupted by non-transient etcd-side or gRPC-side error
// client cannot handle itself (e.g. rpctypes.ErrCompacted)
return false
}
@@ -101,6 +101,7 @@ func RetryKVClient(c *Client) pb.KVClient {
kc: pb.NewKVClient(c.conn),
}
}
+
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
diff --git a/client/v3/retry_interceptor.go b/client_sdk/v3/retry_interceptor.go
similarity index 79%
rename from client/v3/retry_interceptor.go
rename to client_sdk/v3/retry_interceptor.go
index 6f15a9c9739..508f77c4626 100644
--- a/client/v3/retry_interceptor.go
+++ b/client_sdk/v3/retry_interceptor.go
@@ -19,24 +19,23 @@ package clientv3
import (
"context"
- "errors"
+ "fmt"
"io"
"sync"
"time"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "github.com/ls-2018/etcd_cn/code_debug/conf"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
)
-// unaryClientInterceptor returns a new retrying unary client interceptor.
-//
-// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
-// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
@@ -52,23 +51,23 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien
if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
return err
}
- c.GetLogger().Debug(
- "retrying of unary invoker",
- zap.String("target", cc.Target()),
- zap.String("method", method),
- zap.Uint("attempt", attempt),
- )
+ c.GetLogger().Debug("重试调用", zap.String("target", cc.Target()), zap.Uint("attempt", attempt))
+ if !conf.Perf {
+ switch v := req.(type) {
+ case *pb.TxnRequest:
+ d, _ := v.Marshal()
+ fmt.Println("--->:", string(d)) // key:"a" value:"b"
+ default:
+ fmt.Println("--->:", req) // key:"a" value:"b"
+ }
+ fmt.Println("--->:", method) // /etcdserverpb.KV/Put
+ }
+
lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
if lastErr == nil {
return nil
}
- c.GetLogger().Warn(
- "retrying of unary invoker failed",
- zap.String("target", cc.Target()),
- zap.String("method", method),
- zap.Uint("attempt", attempt),
- zap.Error(lastErr),
- )
+ c.GetLogger().Warn("重试一元调用失败", zap.String("target", cc.Target()), zap.Uint("attempt", attempt), zap.Error(lastErr))
if isContextError(lastErr) {
if ctx.Err() != nil {
// its the context deadline or cancellation.
@@ -78,18 +77,24 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien
continue
}
if c.shouldRefreshToken(lastErr, callOpts) {
- gtErr := c.refreshToken(ctx)
- if gtErr != nil {
+ // clear auth token before refreshing it.
+ // call c.Auth.Authenticate with an invalid token will always fail the auth check on the etcd-side,
+ // if the etcd has not apply the patch of pr #12165 (https://github.com/etcd-io/etcd/pull/12165)
+ // and a rpctypes.ErrInvalidAuthToken will recursively call c.getToken until system run out of resource.
+ c.authTokenBundle.UpdateAuthToken("")
+
+ gterr := c.getToken(ctx)
+ if gterr != nil {
c.GetLogger().Warn(
"retrying of unary invoker failed to fetch new auth token",
zap.String("target", cc.Target()),
- zap.Error(gtErr),
+ zap.Error(gterr),
)
- return gtErr // lastErr must be invalid auth token
+ return gterr // lastErr必须是invalid auth token
}
continue
}
- if !isSafeRetry(c, lastErr, callOpts) {
+ if !isSafeRetry(c.lg, lastErr, callOpts) {
return lastErr
}
}
@@ -97,7 +102,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien
}
}
-// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
+// streamClientInterceptor returns a new retrying stream client interceptor for etcd side streaming calls.
//
// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
@@ -109,12 +114,15 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
ctx = withVersion(ctx)
- // getToken automatically. Otherwise, auth token may be invalid after watch reconnection because the token has expired
- // (see https://github.com/etcd-io/etcd/issues/11954 for more).
- err := c.getToken(ctx)
- if err != nil {
- c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err))
- return nil, err
+ // getToken automatically
+ // TODO(cfc4n): keep this code block, remove codes about getToken in client.go after pr #12165 merged.
+ if c.authTokenBundle != nil {
+ // equal to c.Username != "" && c.Password != ""
+ err := c.getToken(ctx)
+ if err != nil && rpctypes.Error(err) != rpctypes.ErrAuthNotEnabled {
+ c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err))
+ return nil, err
+ }
}
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
@@ -147,7 +155,7 @@ func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamCli
// and returns a boolean value.
func (c *Client) shouldRefreshToken(err error, callOpts *options) bool {
if rpctypes.Error(err) == rpctypes.ErrUserEmpty {
- // refresh the token when username, password is present but the server returns ErrUserEmpty
+ // refresh the token when username, password is present but the etcd returns ErrUserEmpty
// which is possible when the client token is cleared somehow
return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != ""
}
@@ -156,23 +164,6 @@ func (c *Client) shouldRefreshToken(err error, callOpts *options) bool {
(rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision)
}
-func (c *Client) refreshToken(ctx context.Context) error {
- if c.authTokenBundle == nil {
- // c.authTokenBundle will be initialized only when
- // c.Username != "" && c.Password != "".
- //
- // When users use the TLS CommonName based authentication, the
- // authTokenBundle is always nil. But it's possible for the clients
- // to get `rpctypes.ErrAuthOldRevision` response when the clients
- // concurrently modify auth data (e.g, addUser, deleteUser etc.).
- // In this case, there is no need to refresh the token; instead the
- // clients just need to retry the operations (e.g. Put, Delete etc).
- return nil
- }
-
- return c.getToken(ctx)
-}
-
// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
// a new ClientStream according to the retry policy.
@@ -271,15 +262,18 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}
return true, err
}
if s.client.shouldRefreshToken(err, s.callOpts) {
- gtErr := s.client.refreshToken(s.ctx)
- if gtErr != nil {
- s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gtErr))
+ // clear auth token to avoid failure when call getToken
+ s.client.authTokenBundle.UpdateAuthToken("")
+
+ gterr := s.client.getToken(s.ctx)
+ if gterr != nil {
+ s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
return false, err // return the original error for simplicity
}
return true, err
}
- return isSafeRetry(s.client, err, s.callOpts), err
+ return isSafeRetry(s.client.lg, err, s.callOpts), err
}
func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
@@ -319,28 +313,17 @@ func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) erro
}
// isSafeRetry returns "true", if request is safe for retry with the given error.
-func isSafeRetry(c *Client, err error, callOpts *options) bool {
+func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
if isContextError(err) {
return false
}
-
- // Situation when learner refuses RPC it is supposed to not serve is from the server
- // perspective not retryable.
- // But for backward-compatibility reasons we need to support situation that
- // customer provides mix of learners (not yet voters) and voters with an
- // expectation to pick voter in the next attempt.
- // TODO: Ideally client should be 'aware' which endpoint represents: leader/voter/learner with high probability.
- if errors.Is(err, rpctypes.ErrGRPCNotSupportedForLearner) && len(c.Endpoints()) > 1 {
- return true
- }
-
switch callOpts.retryPolicy {
case repeatable:
return isSafeRetryImmutableRPC(err)
case nonRepeatable:
return isSafeRetryMutableRPC(err)
default:
- c.lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
+ lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
return false
}
}
@@ -360,14 +343,12 @@ func contextErrToGrpcErr(err error) error {
}
}
-var (
- defaultOptions = &options{
- retryPolicy: nonRepeatable,
- max: 0, // disable
- backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
- retryAuth: true,
- }
-)
+var defaultOptions = &options{
+ retryPolicy: nonRepeatable,
+ max: 0, // disable
+ backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
+ retryAuth: true,
+}
// backoffFunc denotes a family of functions that control the backoff duration between call retries.
//
@@ -391,7 +372,7 @@ func withMax(maxRetries uint) retryOption {
}}
}
-// WithBackoff sets the `BackoffFunc` used to control time between retries.
+// WithBackoff sets the `BackoffFunc `used to control time between retries.
func withBackoff(bf backoffFunc) retryOption {
return retryOption{applyFunc: func(o *options) {
o.backoffFunc = bf
diff --git a/client/v3/snapshot/doc.go b/client_sdk/v3/snapshot/doc.go
similarity index 100%
rename from client/v3/snapshot/doc.go
rename to client_sdk/v3/snapshot/doc.go
diff --git a/client_sdk/v3/snapshot/v3_snapshot.go b/client_sdk/v3/snapshot/v3_snapshot.go
new file mode 100644
index 00000000000..533fb511439
--- /dev/null
+++ b/client_sdk/v3/snapshot/v3_snapshot.go
@@ -0,0 +1,98 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snapshot
+
+import (
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/dustin/go-humanize"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+ "go.uber.org/zap"
+)
+
+// hasChecksum returns "true" if the file size "n"
+// has appended sha256 hash digest.
+func hasChecksum(n int64) bool {
+ // 512 is chosen because it's a minimum disk sector size
+ // smaller than (and multiplies to) OS page size in most systems
+ return (n % 512) == sha256.Size
+}
+
+// Save 从远程etcd获取快照并将数据保存到目标路径.如果上下文 "ctx "被取消或超时,
+// 快照保存流将出错(例如,context.Canceled,context.DeadlineExceeded).
+// 请确保在客户端配置中只指定一个端点.必须向选定的节点请求快照API,而保存的快照是选定节点的时间点状态.
+func Save(ctx context.Context, lg *zap.Logger, cfg clientv3.Config, dbPath string) error {
+ if lg == nil {
+ lg = zap.NewExample()
+ }
+ cfg.Logger = lg.Named("client")
+ if len(cfg.Endpoints) != 1 {
+ return fmt.Errorf("保存快照时,必须指定一个endpoint %v", cfg.Endpoints)
+ }
+ cli, err := clientv3.New(cfg)
+ if err != nil {
+ return err
+ }
+ defer cli.Close()
+
+ partpath := dbPath + ".part"
+ defer os.RemoveAll(partpath)
+
+ var f *os.File
+ f, err = os.OpenFile(partpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fileutil.PrivateFileMode)
+ if err != nil {
+ return fmt.Errorf("不能打开 %s (%v)", partpath, err)
+ }
+ lg.Info("创建临时快照文件", zap.String("path", partpath))
+
+ now := time.Now()
+ var rd io.ReadCloser
+ rd, err = cli.Snapshot(ctx)
+ if err != nil {
+ return err
+ }
+ lg.Info("获取快照ing", zap.String("endpoint", cfg.Endpoints[0]))
+ var size int64
+ size, err = io.Copy(f, rd)
+ if err != nil {
+ return err
+ }
+ if !hasChecksum(size) {
+ return fmt.Errorf("sha256校验和为发现 [bytes: %d]", size)
+ }
+ if err = fileutil.Fsync(f); err != nil {
+ return err
+ }
+ if err = f.Close(); err != nil {
+ return err
+ }
+ lg.Info("已获取快照数据", zap.String("endpoint", cfg.Endpoints[0]),
+ zap.String("size", humanize.Bytes(uint64(size))),
+ zap.String("took", humanize.Time(now)),
+ )
+
+ if err = os.Rename(partpath, dbPath); err != nil {
+ return fmt.Errorf("重命名失败 %s to %s (%v)", partpath, dbPath, err)
+ }
+ lg.Info("已保存", zap.String("path", dbPath))
+ return nil
+}
diff --git a/client/v3/sort.go b/client_sdk/v3/sort.go
similarity index 95%
rename from client/v3/sort.go
rename to client_sdk/v3/sort.go
index 2bb9d9a13b7..9918ea927fe 100644
--- a/client/v3/sort.go
+++ b/client_sdk/v3/sort.go
@@ -14,8 +14,10 @@
package clientv3
-type SortTarget int
-type SortOrder int
+type (
+ SortTarget int
+ SortOrder int
+)
const (
SortNone SortOrder = iota
diff --git a/client_sdk/v3/txn.go b/client_sdk/v3/txn.go
new file mode 100644
index 00000000000..156b5b91435
--- /dev/null
+++ b/client_sdk/v3/txn.go
@@ -0,0 +1,140 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "sync"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "google.golang.org/grpc"
+)
+
+// Txn is the interface that wraps mini-transactions.
+//
+// Txn(context.TODO()).If(
+// Compare(Value(k1), ">", v1),
+// Compare(Version(k1), "=", 2)
+// ).Then(
+// OpPut(k2,v2), OpPut(k3,v3)
+// ).Else(
+// OpPut(k4,v4), OpPut(k5,v5)
+// ).Commit()
+//
+type Txn interface {
+ If(cs ...Cmp) Txn
+ Then(ops ...Op) Txn
+ Else(ops ...Op) Txn
+ Commit() (*TxnResponse, error)
+}
+
+type txn struct {
+ kv *kv
+ ctx context.Context
+
+ mu sync.Mutex
+ cif bool
+ cthen bool
+ celse bool
+
+ isWrite bool
+
+ cmps []*pb.Compare
+
+ sus []*pb.RequestOp
+ fas []*pb.RequestOp
+
+ callOpts []grpc.CallOption
+}
+
+func (txn *txn) If(cs ...Cmp) Txn {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+
+ if txn.cif {
+ panic("cannot call If twice!")
+ }
+
+ if txn.cthen {
+ panic("cannot call If after Then!")
+ }
+
+ if txn.celse {
+ panic("cannot call If after Else!")
+ }
+
+ txn.cif = true
+
+ for i := range cs {
+ txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
+ }
+
+ return txn
+}
+
+func (txn *txn) Then(ops ...Op) Txn {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+
+ if txn.cthen {
+ panic("cannot call Then twice!")
+ }
+ if txn.celse {
+ panic("cannot call Then after Else!")
+ }
+
+ txn.cthen = true
+
+ for _, op := range ops {
+ txn.isWrite = txn.isWrite || op.isWrite()
+ txn.sus = append(txn.sus, op.toRequestOp())
+ }
+
+ return txn
+}
+
+func (txn *txn) Else(ops ...Op) Txn {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+
+ if txn.celse {
+ panic("cannot call Else twice!")
+ }
+
+ txn.celse = true
+
+ for _, op := range ops {
+ txn.isWrite = txn.isWrite || op.isWrite()
+ txn.fas = append(txn.fas, op.toRequestOp())
+ }
+
+ return txn
+}
+
+func (txn *txn) Commit() (*TxnResponse, error) {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+
+ r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
+
+ var resp *pb.TxnResponse
+ var err error
+ resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
+ if err != nil {
+ return nil, toErr(txn.ctx, err)
+ }
+ return (*TxnResponse)(resp), nil
+}
diff --git a/client_sdk/v3/utils.go b/client_sdk/v3/utils.go
new file mode 100644
index 00000000000..b998c41b90f
--- /dev/null
+++ b/client_sdk/v3/utils.go
@@ -0,0 +1,49 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "math/rand"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+)
+
+// jitterUp adds random jitter to the duration.
+//
+// This adds or subtracts time from the duration within a given jitter fraction.
+// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
+//
+// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
+func jitterUp(duration time.Duration, jitter float64) time.Duration {
+ multiplier := jitter * (rand.Float64()*2 - 1)
+ return time.Duration(float64(duration) * (1 + multiplier))
+}
+
+// Check if the provided function is being called in the op options.
+func isOpFuncCalled(op string, opts []OpOption) bool {
+ for _, opt := range opts {
+ v := reflect.ValueOf(opt)
+ if v.Kind() == reflect.Func {
+ if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
+ if strings.Contains(opFunc.Name(), op) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
diff --git a/client_sdk/v3/watch.go b/client_sdk/v3/watch.go
new file mode 100644
index 00000000000..5daf7a6eb66
--- /dev/null
+++ b/client_sdk/v3/watch.go
@@ -0,0 +1,964 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ v3rpc "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+const (
+ EventTypeDelete = mvccpb.DELETE
+ EventTypePut = mvccpb.PUT
+
+ closeSendErrTimeout = 250 * time.Millisecond
+)
+
+type Event mvccpb.Event
+
+type WatchChan <-chan WatchResponse
+
+type Watcher interface {
+ Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
+ // RequestProgress requests a progress notify response be sent in all watch channels.
+ RequestProgress(ctx context.Context) error
+ // Close closes the watcher and cancels all watch requests.
+ Close() error
+}
+
+type WatchResponse struct {
+ Header pb.ResponseHeader
+ Events []*Event
+
+ // CompactRevision is the minimum revision the watcher may receive.
+ CompactRevision int64
+
+ CreatedRevision int64
+
+ // Canceled is used to indicate watch failure.
+ // If the watch failed and the stream was about to close, before the channel is closed,
+ // the channel sends a final response that has Canceled set to true with a non-nil Err().
+ Canceled bool
+
+ // Created is used to indicate the creation of the watcher.
+ Created bool
+
+ closeErr error
+
+ // cancelReason is a reason of canceling watch
+ cancelReason string
+}
+
+// IsCreate returns true if the event tells that the key is newly created.
+func (e *Event) IsCreate() bool {
+ return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
+}
+
+// IsModify returns true if the event tells that a new value is put on existing key.
+func (e *Event) IsModify() bool {
+ return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
+}
+
+// Err is the error value if this WatchResponse holds an error.
+func (wr *WatchResponse) Err() error {
+ switch {
+ case wr.closeErr != nil:
+ return v3rpc.Error(wr.closeErr)
+ case wr.CompactRevision != 0:
+ return v3rpc.ErrCompacted
+ case wr.Canceled:
+ if len(wr.cancelReason) != 0 {
+ return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
+ }
+ return v3rpc.ErrFutureRev
+ }
+ return nil
+}
+
+// IsProgressNotify returns true if the WatchResponse is progress notification.
+func (wr *WatchResponse) IsProgressNotify() bool {
+ return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
+}
+
+type watcher struct {
+ remote pb.WatchClient // 可以与后端通信的客户端
+ callOpts []grpc.CallOption //
+ mu sync.Mutex //
+ streams map[string]*watchGrpcStream // 持有CTX 键值对的所有活动的GRPC流.
+ lg *zap.Logger //
+}
+
+// watchGrpcStream tracks all watch resources attached to a single grpc stream.
+type watchGrpcStream struct {
+ owner *watcher
+ remote pb.WatchClient
+ callOpts []grpc.CallOption
+ ctx context.Context // remote.Watch requests
+ ctxKey string // ctxKey 用来找流的上下文信息
+ cancel context.CancelFunc
+ substreams map[int64]*watcherStream // 持有此 grpc 流上的所有活动的watchers
+ resuming []*watcherStream // 恢复保存此 grpc 流上的所有正在恢复的观察者
+ reqc chan watchStreamRequest // reqc 从 Watch() 向主协程发送观察请求
+ respc chan *pb.WatchResponse // respc 从 watch 客户端接收数据
+ donec chan struct{} // donec 通知广播进行退出
+ errc chan error
+ closingc chan *watcherStream // 获取关闭观察者的观察者流
+ wg sync.WaitGroup // 当所有子流 goroutine 都退出时,wg 完成
+ resumec chan struct{} // resumec 关闭以表示所有子流都应开始恢复
+ closeErr error // closeErr 是关闭监视流的错误
+ lg *zap.Logger
+}
+
+// watchStreamRequest is a union of the supported watch request operation types
+type watchStreamRequest interface {
+ toPB() *pb.WatchRequest
+}
+
+type watchRequest struct {
+ ctx context.Context
+ key string
+ end string
+ rev int64
+ createdNotify bool // 如果该字段为true,则发送创建的通知事件
+ progressNotify bool // 进度更新
+ fragment bool // 是否切分响应,当数据较大时
+ filters []pb.WatchCreateRequest_FilterType
+ prevKV bool
+ retc chan chan WatchResponse
+}
+
+// progressRequest is issued by the subscriber to request watch progress
+type progressRequest struct{}
+
+// watcherStream 代表注册的观察者
+// watch()时,构造watchgrpcstream时构造的watcherStream,用于封装一个watch rpc请求,包含订阅监听key,通知key变更通道,一些重要标志.
+type watcherStream struct {
+ initReq watchRequest // initReq 是发起这个请求的请求
+ outc chan WatchResponse // outc 向订阅者发布watch响应
+ recvc chan *WatchResponse // recvc buffers watch responses before publishing
+ donec chan struct{} // 当 watcherStream goroutine 停止时 donec 关闭
+ closing bool // 当应该安排流关闭时,closures 设置为 true.
+ id int64 // id 是在 grpc 流上注册的 watch id
+ buf []*WatchResponse // buf 保存从 etcd 收到但尚未被客户端消费的所有事件
+}
+
+func NewWatcher(c *Client) Watcher {
+ return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
+}
+
+// NewWatchFromWatchClient watch客户端,已经建立链接
+func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
+ w := &watcher{
+ remote: wc,
+ streams: make(map[string]*watchGrpcStream),
+ }
+ if c != nil {
+ w.callOpts = c.callOpts
+ w.lg = c.lg
+ }
+ return w
+}
+
+// never closes
+var valCtxCh = make(chan struct{})
+var zeroTime = time.Unix(0, 0)
+
+// ctx with only the values; never Done
+type valCtx struct{ context.Context }
+
+func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
+func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
+func (vc *valCtx) Err() error { return nil }
+
+// 与后端建立流 gRPC调用,请求放入serverWatchStream.recvLoop()
+func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
+ ctx, cancel := context.WithCancel(&valCtx{inctx})
+ wgs := &watchGrpcStream{
+ owner: w,
+ remote: w.remote,
+ callOpts: w.callOpts,
+ ctx: ctx,
+ ctxKey: streamKeyFromCtx(inctx),
+ cancel: cancel,
+ substreams: make(map[int64]*watcherStream),
+ respc: make(chan *pb.WatchResponse),
+ reqc: make(chan watchStreamRequest),
+ donec: make(chan struct{}),
+ errc: make(chan error, 1),
+ closingc: make(chan *watcherStream),
+ resumec: make(chan struct{}),
+ lg: w.lg,
+ }
+ go wgs.run()
+ return wgs
+}
+
+// Watch 提交watch请求,等待返回响应
+func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
+ ow := opWatch(key, opts...) // 检查watch请求
+
+ var filters []pb.WatchCreateRequest_FilterType
+ if ow.filterPut {
+ filters = append(filters, pb.WatchCreateRequest_NOPUT)
+ }
+ if ow.filterDelete {
+ filters = append(filters, pb.WatchCreateRequest_NODELETE)
+ }
+
+ wr := &watchRequest{
+ ctx: ctx,
+ createdNotify: ow.createdNotify,
+ key: ow.key,
+ end: ow.end,
+ rev: ow.rev,
+ progressNotify: ow.progressNotify,
+ fragment: ow.fragment,
+ filters: filters,
+ prevKV: ow.prevKV,
+ retc: make(chan chan WatchResponse, 1),
+ }
+
+ ok := false
+ ctxKey := streamKeyFromCtx(ctx) // map[hasleader:[true]]
+
+ var closeCh chan WatchResponse
+ for {
+ // 找到或分配适当的GRPC表流 链接复用
+ w.mu.Lock()
+ if w.streams == nil { // 初始化结构体
+ // closed
+ w.mu.Unlock()
+ ch := make(chan WatchResponse)
+ close(ch)
+ return ch
+ }
+ // streams是一个map,保存所有由 ctx 值键控的活动 grpc 流
+ // 如果该请求对应的流为空,则新建
+ wgs := w.streams[ctxKey]
+ if wgs == nil {
+ // newWatcherGrpcStream new一个watch grpc stream来传输watch请求
+ // 创建goroutine来处理监听key的watch各种事件
+ wgs = w.newWatcherGrpcStream(ctx) // 客户端返回watch流
+ w.streams[ctxKey] = wgs
+ }
+ donec := wgs.donec
+ reqc := wgs.reqc
+ w.mu.Unlock()
+
+ // couldn't create channel; return closed channel
+ if closeCh == nil {
+ closeCh = make(chan WatchResponse, 1)
+ }
+
+ // submit request
+ select {
+ case reqc <- wr: // reqc 从 Watch() 向主协程发送观察请求
+ ok = true
+ case <-wr.ctx.Done():
+ ok = false
+ case <-donec:
+ ok = false
+ if wgs.closeErr != nil {
+ closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
+ break
+ }
+ // retry; may have dropped stream from no ctxs
+ continue
+ }
+
+ // receive channel
+ if ok {
+ select {
+ case ret := <-wr.retc:
+ return ret
+ case <-ctx.Done():
+ case <-donec:
+ if wgs.closeErr != nil {
+ closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
+ break
+ }
+ // retry; may have dropped stream from no ctxs
+ continue
+ }
+ }
+ break
+ }
+
+ close(closeCh)
+ return closeCh
+}
+
+func (w *watcher) Close() (err error) {
+ w.mu.Lock()
+ streams := w.streams
+ w.streams = nil
+ w.mu.Unlock()
+ for _, wgs := range streams {
+ if werr := wgs.close(); werr != nil {
+ err = werr
+ }
+ }
+ // Consider context.Canceled as a successful close
+ if err == context.Canceled {
+ err = nil
+ }
+ return err
+}
+
+// RequestProgress requests a progress notify response be sent in all watch channels.
+func (w *watcher) RequestProgress(ctx context.Context) (err error) {
+ ctxKey := streamKeyFromCtx(ctx)
+
+ w.mu.Lock()
+ if w.streams == nil {
+ w.mu.Unlock()
+ return fmt.Errorf("no stream found for context")
+ }
+ wgs := w.streams[ctxKey]
+ if wgs == nil {
+ wgs = w.newWatcherGrpcStream(ctx) // 客户端建立watch流
+ w.streams[ctxKey] = wgs
+ }
+ donec := wgs.donec
+ reqc := wgs.reqc
+ w.mu.Unlock()
+
+ pr := &progressRequest{}
+
+ select {
+ case reqc <- pr:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-donec:
+ if wgs.closeErr != nil {
+ return wgs.closeErr
+ }
+ // retry; may have dropped stream from no ctxs
+ return w.RequestProgress(ctx)
+ }
+}
+
+func (w *watchGrpcStream) close() (err error) {
+ w.cancel()
+ <-w.donec
+ select {
+ case err = <-w.errc:
+ default:
+ }
+ return toErr(w.ctx, err)
+}
+
+func (w *watcher) closeStream(wgs *watchGrpcStream) {
+ w.mu.Lock()
+ close(wgs.donec)
+ wgs.cancel()
+ if w.streams != nil {
+ delete(w.streams, wgs.ctxKey)
+ }
+ w.mu.Unlock()
+}
+
+func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
+ // check watch ID for backward compatibility (<= v3.3)
+ if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
+ w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
+ // failed; no channel
+ close(ws.recvc)
+ return
+ }
+ ws.id = resp.WatchId
+ w.substreams[ws.id] = ws
+}
+
+func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
+ select {
+ case ws.outc <- *resp:
+ case <-ws.initReq.ctx.Done():
+ case <-time.After(closeSendErrTimeout):
+ }
+ close(ws.outc)
+}
+
+func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
+ // send channel response in case stream was never established
+ select {
+ case ws.initReq.retc <- ws.outc:
+ default:
+ }
+ // close subscriber's channel
+ if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
+ go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
+ } else if ws.outc != nil {
+ close(ws.outc)
+ }
+ if ws.id != -1 {
+ delete(w.substreams, ws.id)
+ return
+ }
+ for i := range w.resuming {
+ if w.resuming[i] == ws {
+ w.resuming[i] = nil
+ return
+ }
+ }
+}
+
+// run 用于管理watch client
+func (w *watchGrpcStream) run() {
+ var wc pb.Watch_WatchClient
+ var closeErr error
+
+ // 子流标记为关闭,但goroutine仍在运行;需要避免双关闭recvc在GRPC流拆卸
+ closing := make(map[*watcherStream]struct{})
+
+ defer func() {
+ w.closeErr = closeErr
+ // 关闭子流并恢复子流
+ for _, ws := range w.substreams {
+ if _, ok := closing[ws]; !ok {
+ close(ws.recvc)
+ closing[ws] = struct{}{}
+ }
+ }
+ for _, ws := range w.resuming {
+ if _, ok := closing[ws]; ws != nil && !ok {
+ close(ws.recvc)
+ closing[ws] = struct{}{}
+ }
+ }
+ w.joinSubstreams()
+ for range closing {
+ w.closeSubstream(<-w.closingc)
+ }
+ w.wg.Wait()
+ w.owner.closeStream(w)
+ }()
+
+ // 与etcd开启grpc流
+ if wc, closeErr = w.newWatchClient(); closeErr != nil {
+ return
+ }
+
+ cancelSet := make(map[int64]struct{})
+
+ var cur *pb.WatchResponse
+ for {
+ select {
+ // Watch() requested
+ case req := <-w.reqc:
+ switch wreq := req.(type) {
+ case *watchRequest:
+ outc := make(chan WatchResponse, 1)
+ // TODO: pass custom watch ID?
+ ws := &watcherStream{
+ initReq: *wreq,
+ id: -1,
+ outc: outc,
+ // unbuffered so resumes won't cause repeat events
+ recvc: make(chan *WatchResponse),
+ }
+
+ ws.donec = make(chan struct{})
+ w.wg.Add(1)
+ go w.serveSubstream(ws, w.resumec)
+
+ // queue up for watcher creation/resume
+ w.resuming = append(w.resuming, ws)
+ if len(w.resuming) == 1 {
+ // head of resume queue, can register a new watcher
+ if err := wc.Send(ws.initReq.toPB()); err != nil {
+ w.lg.Debug("error when sending request", zap.Error(err))
+ }
+ }
+ case *progressRequest:
+ if err := wc.Send(wreq.toPB()); err != nil {
+ w.lg.Debug("error when sending request", zap.Error(err))
+ }
+ }
+
+ case pbresp := <-w.respc: // 来自watch client的新事件
+ if cur == nil || pbresp.Created || pbresp.Canceled {
+ cur = pbresp
+ } else if cur != nil && cur.WatchId == pbresp.WatchId {
+ // 合并新事件
+ cur.Events = append(cur.Events, pbresp.Events...)
+ // update "Fragment" field; last response with "Fragment" == false
+ cur.Fragment = pbresp.Fragment
+ }
+
+ switch {
+ case pbresp.Created: // 表示是创建的请求
+ // response to head of queue creation
+ if len(w.resuming) != 0 {
+ if ws := w.resuming[0]; ws != nil {
+ w.addSubstream(pbresp, ws)
+ w.dispatchEvent(pbresp)
+ w.resuming[0] = nil
+ }
+ }
+
+ if ws := w.nextResume(); ws != nil {
+ if err := wc.Send(ws.initReq.toPB()); err != nil {
+ w.lg.Debug("error when sending request", zap.Error(err))
+ }
+ }
+
+ // reset for next iteration
+ cur = nil
+
+ case pbresp.Canceled && pbresp.CompactRevision == 0:
+ delete(cancelSet, pbresp.WatchId)
+ if ws, ok := w.substreams[pbresp.WatchId]; ok {
+ // signal to stream goroutine to update closingc
+ close(ws.recvc)
+ closing[ws] = struct{}{}
+ }
+
+ // reset for next iteration
+ cur = nil
+
+ case cur.Fragment: // 因为是流的方式传输,所以支持分片传输,遇到分片事件直接跳过
+ continue
+
+ default:
+ // dispatch to appropriate watch stream
+ ok := w.dispatchEvent(cur)
+
+ // reset for next iteration
+ cur = nil
+
+ if ok {
+ break
+ }
+
+ // watch response on unexpected watch id; cancel id
+ if _, ok := cancelSet[pbresp.WatchId]; ok {
+ break
+ }
+
+ cancelSet[pbresp.WatchId] = struct{}{}
+ cr := &pb.WatchRequest_CancelRequest{
+ CancelRequest: &pb.WatchCancelRequest{
+ WatchId: pbresp.WatchId,
+ },
+ }
+ req := &pb.WatchRequest{WatchRequest_CancelRequest: cr}
+ w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId))
+ if err := wc.Send(req); err != nil {
+ w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err))
+ }
+ }
+
+ // 查看client Recv失败.如果可能,生成另一个,重新尝试发送watch请求
+ // 证明发送watch请求失败,会创建watch client再次尝试发送
+
+ case err := <-w.errc:
+ if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
+ closeErr = err
+ return
+ }
+ if wc, closeErr = w.newWatchClient(); closeErr != nil {
+ return
+ }
+ if ws := w.nextResume(); ws != nil {
+ if err := wc.Send(ws.initReq.toPB()); err != nil {
+ w.lg.Debug("error when sending request", zap.Error(err))
+ }
+ }
+ cancelSet = make(map[int64]struct{})
+
+ case <-w.ctx.Done():
+ return
+
+ case ws := <-w.closingc:
+ w.closeSubstream(ws)
+ delete(closing, ws)
+ // no more watchers on this stream, shutdown, skip cancellation
+ if len(w.substreams)+len(w.resuming) == 0 {
+ return
+ }
+ if ws.id != -1 {
+ // client is closing an established watch; close it on the etcd proactively instead of waiting
+ // to close when the next message arrives
+ cancelSet[ws.id] = struct{}{}
+ cr := &pb.WatchRequest_CancelRequest{
+ CancelRequest: &pb.WatchCancelRequest{
+ WatchId: ws.id,
+ },
+ }
+ req := &pb.WatchRequest{
+ WatchRequest_CancelRequest: cr,
+ }
+ w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id))
+ if err := wc.Send(req); err != nil {
+ w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err))
+ }
+ }
+ }
+ }
+}
+
+// nextResume chooses the next resuming to register with the grpc stream. Abandoned
+// streams are marked as nil in the queue since the head must wait for its inflight registration.
+func (w *watchGrpcStream) nextResume() *watcherStream {
+ for len(w.resuming) != 0 {
+ if w.resuming[0] != nil {
+ return w.resuming[0]
+ }
+ w.resuming = w.resuming[1:len(w.resuming)]
+ }
+ return nil
+}
+
+// dispatchEvent sends a WatchResponse to the appropriate watcher stream
+func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
+ events := make([]*Event, len(pbresp.Events))
+ for i, ev := range pbresp.Events {
+ events[i] = (*Event)(ev)
+ }
+ // TODO: return watch ID?
+ wr := &WatchResponse{
+ Header: *pbresp.Header,
+ Events: events,
+ CompactRevision: pbresp.CompactRevision,
+ Created: pbresp.Created,
+ Canceled: pbresp.Canceled,
+ cancelReason: pbresp.CancelReason,
+ }
+
+ // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
+ // indicate they should be broadcast.
+ if wr.IsProgressNotify() && pbresp.WatchId == -1 {
+ return w.broadcastResponse(wr)
+ }
+
+ return w.unicastResponse(wr, pbresp.WatchId)
+}
+
+// broadcastResponse send a watch response to all watch substreams.
+func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
+ for _, ws := range w.substreams {
+ select {
+ case ws.recvc <- wr:
+ case <-ws.donec:
+ }
+ }
+ return true
+}
+
+// unicastResponse sends a watch response to a specific watch substream.
+func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
+ ws, ok := w.substreams[watchId]
+ if !ok {
+ return false
+ }
+ select {
+ case ws.recvc <- wr:
+ case <-ws.donec:
+ return false
+ }
+ return true
+}
+
+// serveWatchClient forwards messages from the grpc stream to run()
+func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
+ for {
+ resp, err := wc.Recv()
+ if err != nil {
+ select {
+ case w.errc <- err:
+ case <-w.donec:
+ }
+ return
+ }
+ select {
+ case w.respc <- resp:
+ case <-w.donec:
+ return
+ }
+ }
+}
+
+// serveSubstream forwards watch responses from run() to the subscriber
+func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
+ if ws.closing {
+ panic("created substream goroutine but substream is closing")
+ }
+
+ // nextRev is the minimum expected next revision
+ nextRev := ws.initReq.rev
+ resuming := false
+ defer func() {
+ if !resuming {
+ ws.closing = true
+ }
+ close(ws.donec)
+ if !resuming {
+ w.closingc <- ws
+ }
+ w.wg.Done()
+ }()
+
+ emptyWr := &WatchResponse{}
+ for {
+ curWr := emptyWr
+ outc := ws.outc
+
+ if len(ws.buf) > 0 {
+ curWr = ws.buf[0]
+ } else {
+ outc = nil
+ }
+ select {
+ case outc <- *curWr:
+ if ws.buf[0].Err() != nil {
+ return
+ }
+ ws.buf[0] = nil
+ ws.buf = ws.buf[1:]
+ case wr, ok := <-ws.recvc:
+ if !ok {
+ // shutdown from closeSubstream
+ return
+ }
+
+ if wr.Created {
+ if ws.initReq.retc != nil {
+ ws.initReq.retc <- ws.outc
+ // to prevent next write from taking the slot in buffered channel
+ // and posting duplicate create events
+ ws.initReq.retc = nil
+
+ // send first creation event only if requested
+ if ws.initReq.createdNotify {
+ ws.outc <- *wr
+ }
+ // once the watch channel is returned, a current revision
+ // watch must resume at the store revision. This is necessary
+ // for the following case to work as expected:
+ // wch := m1.Watch("a")
+ // m2.Put("a", "b")
+ // <-wch
+ // If the revision is only bound on the first observed event,
+ // if wch is disconnected before the Put is issued, then reconnects
+ // after it is committed, it'll miss the Put.
+ if ws.initReq.rev == 0 {
+ nextRev = wr.Header.Revision
+ }
+ }
+ } else {
+ // current progress of watch; <= store revision
+ nextRev = wr.Header.Revision
+ }
+
+ if len(wr.Events) > 0 {
+ nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
+ }
+ ws.initReq.rev = nextRev
+
+ // created event is already sent above,
+ // watcher should not post duplicate events
+ if wr.Created {
+ continue
+ }
+
+ // TODO pause channel if buffer gets too large
+ ws.buf = append(ws.buf, wr)
+ case <-w.ctx.Done():
+ return
+ case <-ws.initReq.ctx.Done():
+ return
+ case <-resumec:
+ resuming = true
+ return
+ }
+ }
+ // lazily send cancel message if events on missing id
+}
+
+func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
+ // 将所有子流标记为恢复
+ close(w.resumec)
+ w.resumec = make(chan struct{})
+ w.joinSubstreams()
+ for _, ws := range w.substreams {
+ ws.id = -1
+ w.resuming = append(w.resuming, ws)
+ }
+ // strip out nils, if any
+ var resuming []*watcherStream
+ for _, ws := range w.resuming {
+ if ws != nil {
+ resuming = append(resuming, ws)
+ }
+ }
+ w.resuming = resuming
+ w.substreams = make(map[int64]*watcherStream)
+
+ // connect to grpc stream while accepting watcher cancelation
+ stopc := make(chan struct{})
+ donec := w.waitCancelSubstreams(stopc)
+ wc, err := w.openWatchClient()
+ close(stopc)
+ <-donec
+
+ // serve all non-closing streams, even if there's a client error
+ // so that the teardown path can shutdown the streams as expected.
+ for _, ws := range w.resuming {
+ if ws.closing {
+ continue
+ }
+ ws.donec = make(chan struct{})
+ w.wg.Add(1)
+ go w.serveSubstream(ws, w.resumec)
+ }
+
+ if err != nil {
+ return nil, v3rpc.Error(err)
+ }
+
+ // receive data from new grpc stream
+ go w.serveWatchClient(wc)
+ return wc, nil
+}
+
+func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
+ var wg sync.WaitGroup
+ wg.Add(len(w.resuming))
+ donec := make(chan struct{})
+ for i := range w.resuming {
+ go func(ws *watcherStream) {
+ defer wg.Done()
+ if ws.closing {
+ if ws.initReq.ctx.Err() != nil && ws.outc != nil {
+ close(ws.outc)
+ ws.outc = nil
+ }
+ return
+ }
+ select {
+ case <-ws.initReq.ctx.Done():
+ // closed ws will be removed from resuming
+ ws.closing = true
+ close(ws.outc)
+ ws.outc = nil
+ w.wg.Add(1)
+ go func() {
+ defer w.wg.Done()
+ w.closingc <- ws
+ }()
+ case <-stopc:
+ }
+ }(w.resuming[i])
+ }
+ go func() {
+ defer close(donec)
+ wg.Wait()
+ }()
+ return donec
+}
+
+// joinSubstreams 等待所有sub stream完成
+func (w *watchGrpcStream) joinSubstreams() {
+ for _, ws := range w.substreams {
+ <-ws.donec
+ }
+ for _, ws := range w.resuming {
+ if ws != nil {
+ <-ws.donec
+ }
+ }
+}
+
+var maxBackoff = 100 * time.Millisecond
+
+// openWatchClient retries opening a watch client until success or halt.
+// manually retry in case "ws==nil && err==nil"
+// TODO: remove FailFast=false
+func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
+ backoff := time.Millisecond
+ for {
+ select {
+ case <-w.ctx.Done():
+ if err == nil {
+ return nil, w.ctx.Err()
+ }
+ return nil, err
+ default:
+ }
+ if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
+ break
+ }
+ if isHaltErr(w.ctx, err) {
+ return nil, v3rpc.Error(err)
+ }
+ if isUnavailableErr(w.ctx, err) {
+ // retry, but backoff
+ if backoff < maxBackoff {
+ // 25% backoff factor
+ backoff = backoff + backoff/4
+ if backoff > maxBackoff {
+ backoff = maxBackoff
+ }
+ }
+ time.Sleep(backoff)
+ }
+ }
+ return ws, nil
+}
+
+// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
+func (wr *watchRequest) toPB() *pb.WatchRequest {
+ req := &pb.WatchCreateRequest{
+ StartRevision: wr.rev,
+ Key: string([]byte(wr.key)),
+ RangeEnd: string([]byte(wr.end)),
+ ProgressNotify: wr.progressNotify,
+ Filters: wr.filters,
+ PrevKv: wr.prevKV,
+ Fragment: wr.fragment,
+ }
+ cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
+ return &pb.WatchRequest{WatchRequest_CreateRequest: cr}
+}
+
+// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
+func (pr *progressRequest) toPB() *pb.WatchRequest {
+ req := &pb.WatchProgressRequest{}
+ cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
+ return &pb.WatchRequest{WatchRequest_ProgressRequest: cr}
+}
+
+// 将ctx转换成str
+func streamKeyFromCtx(ctx context.Context) string {
+ if md, ok := metadata.FromOutgoingContext(ctx); ok {
+ return fmt.Sprintf("%+v", md)
+ }
+ return ""
+}
diff --git a/client_sdk/v3/yaml/config.go b/client_sdk/v3/yaml/config.go
new file mode 100644
index 00000000000..480d00da408
--- /dev/null
+++ b/client_sdk/v3/yaml/config.go
@@ -0,0 +1,91 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml handles yaml-formatted clientv3 configuration data.
+package yaml
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "io/ioutil"
+
+ "sigs.k8s.io/yaml"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/tlsutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3"
+)
+
+type yamlConfig struct {
+ clientv3.Config
+
+ InsecureTransport bool `json:"insecure-transport"`
+ InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
+ Certfile string `json:"cert-file"`
+ Keyfile string `json:"key-file"`
+ TrustedCAfile string `json:"trusted-ca-file"`
+
+ // CAfile is being deprecated. Use 'TrustedCAfile' instead.
+ // TODO: deprecate this in v4
+ CAfile string `json:"ca-file"`
+}
+
+// NewConfig creates a new clientv3.Config from a yaml file.
+func NewConfig(fpath string) (*clientv3.Config, error) {
+ b, err := ioutil.ReadFile(fpath)
+ if err != nil {
+ return nil, err
+ }
+
+ yc := &yamlConfig{}
+
+ err = yaml.Unmarshal(b, yc)
+ if err != nil {
+ return nil, err
+ }
+
+ if yc.InsecureTransport {
+ return &yc.Config, nil
+ }
+
+ var (
+ cert *tls.Certificate
+ cp *x509.CertPool
+ )
+
+ if yc.Certfile != "" && yc.Keyfile != "" {
+ cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if yc.TrustedCAfile != "" {
+ cp, err = tlsutil.NewCertPool([]string{yc.TrustedCAfile})
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ tlscfg := &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ InsecureSkipVerify: yc.InsecureSkipTLSVerify,
+ RootCAs: cp,
+ }
+ if cert != nil {
+ tlscfg.Certificates = []tls.Certificate{*cert}
+ }
+ yc.Config.TLS = tlscfg
+
+ return &yc.Config, nil
+}
diff --git a/code-of-conduct.md b/code-of-conduct.md
deleted file mode 100644
index d79cc5488d3..00000000000
--- a/code-of-conduct.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## etcd Community Code of Conduct
-
-etcd follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
diff --git a/code_debug/conf/conf.go b/code_debug/conf/conf.go
new file mode 100644
index 00000000000..7263e271ff8
--- /dev/null
+++ b/code_debug/conf/conf.go
@@ -0,0 +1,3 @@
+package conf
+
+var Perf = false
diff --git a/code_debug/conn/addr.go b/code_debug/conn/addr.go
new file mode 100644
index 00000000000..12be0d43587
--- /dev/null
+++ b/code_debug/conn/addr.go
@@ -0,0 +1,72 @@
+package conn
+
+import (
+ "fmt"
+ "net"
+ "os/exec"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/code_debug/conf"
+)
+
+func PrintConn(line string, c net.Conn) {
+ if conf.Perf {
+ return
+ }
+
+ _, port, _ := net.SplitHostPort(c.RemoteAddr().String())
+ res := SubCommand([]string{"zsh", "-c", fmt.Sprintf("lsof -itcp -n|grep '%s->'", port)})
+ if res == "" {
+ return
+ }
+ pid := SubCommand([]string{"zsh", "-c", fmt.Sprintf("echo '%s'| awk '{print $2}'", res)})
+ if pid == "" {
+ return
+ }
+ commandRes := SubCommand([]string{"zsh", "-c", fmt.Sprintf("ps -ef|grep -v grep|grep -v zsh |grep '%s'", pid)})
+ if commandRes == "" {
+ return
+ }
+ command := SubCommand([]string{"zsh", "-c", fmt.Sprintf("echo '%s'| awk '{$1=$2=$3=$4=$5=$6=$7=\"\"; print $0}'", commandRes)})
+ if command == "" {
+ return
+ }
+ pr := fmt.Sprintf("%s RemoteAddr:%s-->localAddr:%s [%s]", line, c.RemoteAddr().String(), c.LocalAddr().String(), strings.Trim(command, " \n"))
+ Green(pr)
+}
+
+func SubCommand(opt []string) (result string) {
+ cmd := exec.Command(opt[0], opt[1:]...)
+ // 命令的错误输出和标准输出都连接到同一个管道
+ stdout, err := cmd.StdoutPipe()
+ cmd.Stderr = cmd.Stdout
+ if err != nil {
+ panic(err)
+ }
+ if err = cmd.Start(); err != nil {
+ panic(err)
+ }
+ for {
+ tmp := make([]byte, 1024)
+ _, err := stdout.Read(tmp)
+ res := strings.Split(string(tmp), "\n")
+ for _, v := range res {
+ if len(v) > 0 && v[0] != '\u0000' {
+ // fmt.Println(v)
+ result += v
+ // log.Debug(v)
+ }
+ }
+ if err != nil {
+ break
+ }
+ }
+ return result
+ //if err = cmd.Wait(); err != nil {
+ // log.Fatal(err)
+ //}
+}
+
+func Green(pr string) {
+ fmt.Println(fmt.Sprintf("\033[1;32;42m %s \033[0m", pr))
+}
diff --git a/code_debug/db/main.go b/code_debug/db/main.go
new file mode 100644
index 00000000000..16685d8f6a0
--- /dev/null
+++ b/code_debug/db/main.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+ "log"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+func main() {
+ f()
+}
+
+func f() error {
+ // 打开boltdb文件,获取db对象
+ db, err := bolt.Open("db", 0o600, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+ // 参数true表示创建一个写事务,false读事务
+ tx, err := db.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ // 使用事务对象创建key bucket
+ b, err := tx.CreateBucketIfNotExists([]byte("key"))
+ if err != nil {
+ return err
+ }
+ // 使用bucket对象更新一个key
+ if err := b.Put([]byte("r94"), []byte("world")); err != nil {
+ return err
+ }
+ // 提交事务
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/code_debug/host/host.go b/code_debug/host/host.go
new file mode 100644
index 00000000000..ec4c8f292dc
--- /dev/null
+++ b/code_debug/host/host.go
@@ -0,0 +1,17 @@
+package main
+
+import (
+ "fmt"
+ "net"
+
+ "go.uber.org/zap"
+)
+
+func main() {
+ fmt.Println(net.ParseIP("www.baidu.com"))
+ fmt.Println(net.ParseIP("127.168.1.2"))
+ //
+ // 127.168.1.2
+ zap.NewNop().Debug("asdasdasdasd")
+ zap.NewNop().Fatal("asdasdasdasd")
+}
diff --git a/code_debug/init.sh b/code_debug/init.sh
new file mode 100644
index 00000000000..fbe28809807
--- /dev/null
+++ b/code_debug/init.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+etcdctl put a 1
+etcdctl put b 1
+etcdctl put b1 1
+etcdctl put b2 1
+etcdctl put b3 1
+etcdctl put c 1
diff --git a/code_debug/main.go b/code_debug/main.go
new file mode 100644
index 00000000000..860f8c9ef6b
--- /dev/null
+++ b/code_debug/main.go
@@ -0,0 +1,104 @@
+package main
+
+import (
+ "crypto/md5"
+ "encoding/json"
+ "fmt"
+ math_bits "math/bits"
+ "net"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+func mai2n() {
+ fmt.Println(net.ParseIP("http://127.0.0.1:8080"))
+ fmt.Println(net.ParseIP("127.0.0.1:8080"))
+ fmt.Println(net.ParseIP("www.baidu.com"))
+ fmt.Println(strconv.Atoi("12h"))
+ fmt.Println(strconv.FormatUint(uint64(123456), 16))
+ var ch chan int
+ ch = nil
+ select {
+ case a := <-ch:
+ fmt.Println("<-ch", a)
+ default:
+
+ }
+ var x uint64
+ x = 1
+ fmt.Println((math_bits.Len64(x|1) + 6) / 7)
+ fmt.Println("over")
+ hash := md5.New()
+ hash.Write([]byte("hello"))
+ fmt.Println(fmt.Sprintf("%x", hash.Sum(nil)))
+
+ a := `{"header":{"ID":7587861231285799685},"put":{"key":"YQ==","value":"Yg=="}}`
+ b := `{"ID":7587861231285799684,"Method":"PUT","Path":"/0/version","Val":"3.5.0","Dir":false,"PrevValue":"","PrevIndex":0,"Expiration":0,"Wait":false,"Since":0,"Recursive":false,"Sorted":false,"Quorum":false,"Time":0,"Stream":false}`
+ fmt.Println(json.Unmarshal([]byte(a), &etcdserverpb.InternalRaftRequest{}))
+ fmt.Println(json.Unmarshal([]byte(b), &etcdserverpb.InternalRaftRequest{}))
+ var c time.Time
+ fmt.Println(c.IsZero())
+ var d JointConfig
+ fmt.Println(d[1]["a"])
+}
+
+type (
+ Config map[string]string
+ JointConfig [2]Config
+)
+
+func main3() {
+ fmt.Println(strings.Compare("a", "b"))
+ fmt.Println(strings.Compare("a", "a"))
+ fmt.Println(strings.Compare("b", "ab"))
+ a := []*A{
+ {Key: "a"},
+ {Key: "b"},
+ {Key: "c"},
+ {Key: "d"},
+ }
+ sort.Sort(permSlice(a))
+ for _, i := range a {
+ fmt.Println(i.Key)
+ }
+
+ // 在已有的权限中,
+ idx := sort.Search(len(a), func(i int) bool {
+ // a,a 0
+ // a b -1
+ // b a 1
+ // a,b,c,d,e
+ // c
+ return strings.Compare(a[i].Key, "gc") >= 0
+ })
+ fmt.Println(idx)
+}
+
+type A struct {
+ Key string
+}
+
+type permSlice []*A
+
+func (perms permSlice) Len() int {
+ return len(perms)
+}
+
+func (perms permSlice) Less(i, j int) bool {
+ // a,a 0
+ // a b -1
+ // b a 1
+
+ return strings.Compare(perms[i].Key, perms[j].Key) < 0
+}
+
+func (perms permSlice) Swap(i, j int) {
+ perms[i], perms[j] = perms[j], perms[i]
+}
+
+func main() {
+}
diff --git a/code_debug/txn/main.go b/code_debug/txn/main.go
new file mode 100644
index 00000000000..d301ef0bb2f
--- /dev/null
+++ b/code_debug/txn/main.go
@@ -0,0 +1,55 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "log"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+)
+
+func main() {
+ endpoints := []string{"127.0.0.1:2379"}
+ cli, err := clientv3.New(clientv3.Config{Endpoints: endpoints})
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer cli.Close()
+
+ // 创建租约
+ lease := clientv3.NewLease(cli)
+
+ // 设置租约时间
+ leaseResp, err := lease.Grant(context.TODO(), 10) // 秒
+ if err != nil {
+ fmt.Printf("设置租约时间失败:%s\n", err.Error())
+ }
+ _, err = cli.Put(context.Background(), "a", "x", clientv3.WithLease(leaseResp.ID))
+ if err != nil {
+ log.Fatal(err)
+ }
+ resp, err := cli.Txn(context.TODO()).If(
+ clientv3.Compare(clientv3.LeaseValue("a"), "=", leaseResp.ID),
+ ).Then(
+ clientv3.OpPut("b", "v30"),
+ ).Commit()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if err != nil {
+ log.Fatal(err)
+ }
+ for _, rp := range resp.Responses {
+ res := rp.GetResponseRange()
+ if res == nil {
+ continue
+ }
+ for _, ev := range res.Kvs {
+ fmt.Printf("%s -> %s, create revision = %d\n",
+ ev.Key,
+ ev.Value,
+ ev.CreateRevision)
+ }
+ }
+}
diff --git a/codecov.yml b/codecov.yml
deleted file mode 100644
index 5dfc7b9b934..00000000000
--- a/codecov.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-codecov:
- token: "6040de41-c073-4d6f-bbf8-d89256ef31e1"
- disable_default_path_fixes: true
-
-fixes:
- - "go.etcd.io/etcd/api/v3/::api/"
- - "go.etcd.io/etcd/client/v3/::client/v3/"
- - "go.etcd.io/etcd/client/v2/::client/v2/"
- - "go.etcd.io/etcd/etcdctl/v3/::etcdctl/"
- - "go.etcd.io/etcd/pkg/v3/::pkg/"
- - "go.etcd.io/etcd/server/v3/::server/"
-
-ignore:
- - "**/*.pb.go"
- - "**/*.pb.gw.go"
- - "tests/**/*"
- - "go.etcd.io/etcd/tests/**/*"
diff --git a/config.ini b/config.ini
new file mode 100644
index 00000000000..e27c543aaea
--- /dev/null
+++ b/config.ini
@@ -0,0 +1,17 @@
+# List of all opts:
+# port
+# debug
+# offset
+# should_check_version
+# open_browser
+# open_neat_window
+
+# Port for website
+port=:500
+debug=false
+# number of records on a single screen
+offset=100
+should_check_version=true
+open_browser=true
+# has effect only if 'open browser' is true
+open_neat_window=true
diff --git a/contrib/README.md b/contrib/README.md
deleted file mode 100644
index 33af884c1f1..00000000000
--- a/contrib/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-## Contrib
-
-Scripts and files which may be useful but aren't part of the core etcd project.
-
-* [systemd](systemd) - an example unit file for deploying etcd on systemd-based distributions
-* [raftexample](raftexample) - an example distributed key-value store using raft
-* [systemd/etcd3-multinode](systemd/etcd3-multinode) - multi-node cluster setup with systemd
diff --git a/contrib/lock/README.md b/contrib/lock/README.md
deleted file mode 100644
index d33630e25fa..00000000000
--- a/contrib/lock/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# What is this?
-This directory provides an executable example of the scenarios described in [the article by Martin Kleppmann][fencing].
-
-Generally speaking, a lease-based lock service cannot provide mutual exclusion to processes. This is because such a lease mechanism depends on the physical clock of both the lock service and client processes. Many factors (e.g. stop-the-world GC pause of a language runtime) can cause false expiration of a granted lease as depicted in the below figure: ![unsafe lock][unsafe-lock]
-
-As discussed in [notes on the usage of lock and lease][why], such a problem can be solved with a technique called version number validation or fencing tokens. With this technique a shared resource (storage in the figures) needs to validate requests from clients based on their tokens like this: ![fencing tokens][fencing-tokens]
-
-This directory contains two programs: `client` and `storage`. With `etcd`, you can reproduce the expired lease problem of distributed locking and a simple example solution of the validation technique which can avoid incorrect access from a client with an expired lease.
-
-`storage` works as a very simple key value in-memory store which is accessible through HTTP and a custom JSON protocol. `client` works as client processes which tries to write a key/value to `storage` with coordination of etcd locking.
-
-## How to build
-
-For building `client` and `storage`, just execute `go build` in each directory.
-
-## How to try
-
-At first, you need to start an etcd cluster, which works as lock service in the figures. On top of the etcd source directory, execute commands like below:
-```
-$ make # build etcd
-$ bin/etcd # start etcd
-```
-
-Then run `storage` command in `storage` directory:
-```
-$ ./storage
-```
-
-Now client processes ("Client 1" and "Client 2" in the figures) can be started. At first, execute below command for starting a client process which corresponds to "Client 1":
-```
-$ ./client 1
-```
-It will show an output like this:
-```
-client 1 starts
-created etcd client and session
-acquired lock, version: 694d82254d5fa305
-please manually revoke the lease using 'etcdctl lease revoke 694d82254d5fa305' or wait for it to expire, then start executing client 2 and hit any key...
-```
-
-Verify the lease was created using:
-```
-$ bin/etcdctl lease list
-found 1 leases
-694d82254d5fa305
-```
-
-Then proceed to manually revoke the lease using:
-```
-$ bin/etcdctl lease revoke 694d82254d5fa305
-lease 694d82254d5fa305 revoked
-```
-
-Now another client process can be started like this:
-```
-$ ./client 2
-client 2 starts
-created etcd client and session
-acquired lock, version: 694d82254e18770a
-this is client 2, continuing
-```
-If things go well the second client process invoked as `./client 2` finishes soon. It successfully writes a key to `storage` process.
-
-After checking this, please hit any key for `./client 1` and resume the process. It will show an output like below:
-```
-resuming client 1
-expected fail to write to storage with old lease version: error: given version (694d82254d5fa305) is different from the existing version (694d82254e18770a)
-```
-
-[fencing]: https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html
-[fencing-tokens]: https://martin.kleppmann.com/2016/02/fencing-tokens.png
-[unsafe-lock]: https://martin.kleppmann.com/2016/02/unsafe-lock.png
-[why]: https://etcd.io/docs/next/learning/why/#notes-on-the-usage-of-lock-and-lease
diff --git a/contrib/lock/client/.gitignore b/contrib/lock/client/.gitignore
deleted file mode 100644
index 2a11f8b9558..00000000000
--- a/contrib/lock/client/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-client
\ No newline at end of file
diff --git a/contrib/lock/client/client.go b/contrib/lock/client/client.go
deleted file mode 100644
index 066b70e51d2..00000000000
--- a/contrib/lock/client/client.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2020 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// An example distributed locking with fencing in the case of etcd
-// Based on https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html
-
-package main
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io"
- "log"
- "net/http"
- "os"
- "strconv"
- "time"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
-)
-
-type request struct {
- Op string `json:"op"`
- Key string `json:"key"`
- Val string `json:"val"`
- Version int64 `json:"version"`
-}
-
-type response struct {
- Val string `json:"val"`
- Version int64 `json:"version"`
- Err string `json:"err"`
-}
-
-func write(key string, value string, version int64) error {
- req := request{
- Op: "write",
- Key: key,
- Val: value,
- Version: version,
- }
-
- reqBytes, err := json.Marshal(&req)
- if err != nil {
- log.Fatalf("failed to marshal request: %s", err)
- }
-
- httpResp, err := http.Post("http://localhost:8080", "application/json", bytes.NewReader(reqBytes))
- if err != nil {
- log.Fatalf("failed to send a request to storage: %s", err)
- }
-
- respBytes, err := io.ReadAll(httpResp.Body)
- if err != nil {
- log.Fatalf("failed to read request body: %s", err)
- }
-
- resp := new(response)
- err = json.Unmarshal(respBytes, resp)
- if err != nil {
- log.Fatalf("failed to unmarshal response json: %s", err)
- }
-
- if resp.Err != "" {
- return fmt.Errorf("error: %s", resp.Err)
- }
-
- return nil
-}
-
-func main() {
- if len(os.Args) != 2 {
- log.Fatalf("usage: %s <1 or 2>", os.Args[0])
- }
-
- mode, err := strconv.Atoi(os.Args[1])
- if err != nil || mode != 1 && mode != 2 {
- log.Fatalf("mode should be 1 or 2 (given value is %s)", os.Args[1])
- }
-
- log.Printf("client %d starts\n", mode)
-
- client, err := clientv3.New(clientv3.Config{
- Endpoints: []string{"http://127.0.0.1:2379", "http://127.0.0.1:22379", "http://127.0.0.1:32379"},
- })
- if err != nil {
- log.Fatalf("failed to create an etcd client: %s", err)
- }
-
- // do a connection check first, otherwise it will hang infinitely on newSession
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- _, err = client.MemberList(ctx)
- if err != nil {
- log.Fatalf("failed to reach etcd: %s", err)
- }
-
- session, err := concurrency.NewSession(client, concurrency.WithTTL(1))
- if err != nil {
- log.Fatalf("failed to create a session: %s", err)
- }
-
- log.Print("created etcd client and session")
-
- locker := concurrency.NewLocker(session, "/lock")
- locker.Lock()
- defer locker.Unlock()
- version := session.Lease()
- log.Printf("acquired lock, version: %x", version)
-
- if mode == 1 {
- log.Printf("please manually revoke the lease using 'etcdctl lease revoke %x' or wait for it to expire, then start executing client 2 and hit any key...", version)
- reader := bufio.NewReader(os.Stdin)
- _, _ = reader.ReadByte()
- log.Print("resuming client 1")
- } else {
- log.Print("this is client 2, continuing\n")
- }
-
- err = write("key0", fmt.Sprintf("value from client %x", mode), int64(version))
- if err != nil {
- if mode == 1 {
- log.Printf("expected fail to write to storage with old lease version: %s\n", err) // client 1 should show this message
- } else {
- log.Fatalf("unexpected fail to write to storage: %s\n", err)
- }
- } else {
- log.Printf("successfully write a key to storage using lease %x\n", int64(version))
- }
-}
diff --git a/contrib/lock/storage/.gitignore b/contrib/lock/storage/.gitignore
deleted file mode 100644
index 5d252d7c9fb..00000000000
--- a/contrib/lock/storage/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-storage
\ No newline at end of file
diff --git a/contrib/lock/storage/storage.go b/contrib/lock/storage/storage.go
deleted file mode 100644
index 7e39e38f62d..00000000000
--- a/contrib/lock/storage/storage.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2020 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "os"
- "strings"
-)
-
-type value struct {
- val string
- version int64
-}
-
-var data = make(map[string]*value)
-
-type request struct {
- Op string `json:"op"`
- Key string `json:"key"`
- Val string `json:"val"`
- Version int64 `json:"version"`
-}
-
-type response struct {
- Val string `json:"val"`
- Version int64 `json:"version"`
- Err string `json:"err"`
-}
-
-func writeResponse(resp response, w http.ResponseWriter) {
- wBytes, err := json.Marshal(resp)
- if err != nil {
- fmt.Printf("failed to marshal json: %s\n", err)
- os.Exit(1)
- }
- _, err = w.Write(wBytes)
- if err != nil {
- fmt.Printf("failed to write a response: %s\n", err)
- os.Exit(1)
- }
-}
-
-func handler(w http.ResponseWriter, r *http.Request) {
- rBytes, err := io.ReadAll(r.Body)
- if err != nil {
- fmt.Printf("failed to read http request: %s\n", err)
- os.Exit(1)
- }
-
- var req request
- err = json.Unmarshal(rBytes, &req)
- if err != nil {
- fmt.Printf("failed to unmarshal json: %s\n", err)
- os.Exit(1)
- }
-
- if strings.Compare(req.Op, "read") == 0 {
- if val, ok := data[req.Key]; ok {
- writeResponse(response{val.val, val.version, ""}, w)
- } else {
- writeResponse(response{"", -1, "key not found"}, w)
- }
- } else if strings.Compare(req.Op, "write") == 0 {
- if val, ok := data[req.Key]; ok {
- if req.Version != val.version {
- writeResponse(response{"", -1, fmt.Sprintf("given version (%x) is different from the existing version (%x)", req.Version, val.version)}, w)
- } else {
- data[req.Key].val = req.Val
- data[req.Key].version = req.Version
- writeResponse(response{req.Val, req.Version, ""}, w)
- }
- } else {
- data[req.Key] = &value{req.Val, req.Version}
- writeResponse(response{req.Val, req.Version, ""}, w)
- }
- } else {
- fmt.Printf("unknown op: %s\n", escape(req.Op))
- return
- }
-}
-
-func escape(s string) string {
- escaped := strings.ReplaceAll(s, "\n", " ")
- escaped = strings.ReplaceAll(escaped, "\r", " ")
- return escaped
-}
-
-func main() {
- http.HandleFunc("/", handler)
- err := http.ListenAndServe(":8080", nil)
- if err != nil {
- fmt.Printf("failed to listen and serve: %s\n", err)
- os.Exit(1)
- }
-}
diff --git a/contrib/mixin/Makefile b/contrib/mixin/Makefile
deleted file mode 100644
index 843215b00c4..00000000000
--- a/contrib/mixin/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-.PHONY: tools manifests test clean
-
-OS := linux
-ARCH ?= amd64
-PROMETHEUS_VERSION := 2.33.1
-
-tools:
- go install github.com/google/go-jsonnet/cmd/jsonnet@latest
- go install github.com/brancz/gojsontoyaml@latest
- wget -qO- "https://github.com/prometheus/prometheus/releases/download/v${PROMETHEUS_VERSION}/prometheus-${PROMETHEUS_VERSION}.${OS}-${ARCH}.tar.gz" |\
- tar xvz --strip-components=1 -C "$$(go env GOPATH)/bin" prometheus-${PROMETHEUS_VERSION}.${OS}-${ARCH}/promtool
-
-manifests: manifests/etcd-prometheusRules.yaml
-
-manifests/etcd-prometheusRules.yaml:
- mkdir -p manifests
- jsonnet -e '(import "mixin.libsonnet").prometheusAlerts' | gojsontoyaml > manifests/etcd-prometheusRules.yaml
-
-test: manifests/etcd-prometheusRules.yaml
- promtool test rules test.yaml
-
-clean:
- rm -rf manifests/*.yaml
diff --git a/contrib/mixin/README.md b/contrib/mixin/README.md
deleted file mode 100644
index 2ec70004cda..00000000000
--- a/contrib/mixin/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# Prometheus Monitoring Mixin for etcd
-
-> NOTE: This project is *alpha* stage. Flags, configuration, behaviour and design may change significantly in following releases.
-
-A set of customisable Prometheus alerts for etcd.
-
-Instructions for use are the same as the [kubernetes-mixin](https://github.com/kubernetes-monitoring/kubernetes-mixin).
-
-## Background
-
-* For more information about monitoring mixins, see this [design doc](https://docs.google.com/document/d/1A9xvzwqnFVSOZ5fD3blKODXfsat5fg6ZhnKu9LK3lB4/edit#).
-
-## Testing alerts
-
-Make sure to have [jsonnet](https://jsonnet.org/) and [gojsontoyaml](https://github.com/brancz/gojsontoyaml) installed. You can fetch it via
-
-```
-make tools
-```
-
-First compile the mixin to a YAML file, which the promtool will read:
-```
-make manifests
-```
-
-Then run the unit test:
-```
-promtool test rules test.yaml
-```
diff --git a/contrib/mixin/mixin.libsonnet b/contrib/mixin/mixin.libsonnet
deleted file mode 100644
index f220eab56de..00000000000
--- a/contrib/mixin/mixin.libsonnet
+++ /dev/null
@@ -1,1445 +0,0 @@
-{
- _config+:: {
- etcd_selector: 'job=~".*etcd.*"',
- // etcd_instance_labels are the label names that are uniquely
- // identifying an instance and need to be aggreated away for alerts
- // that are about an etcd cluster as a whole. For example, if etcd
- // instances are deployed on K8s, you will likely want to change
- // this to 'instance, pod'.
- etcd_instance_labels: 'instance',
- // scrape_interval_seconds is the global scrape interval which can be
- // used to dynamically adjust rate windows as a function of the interval.
- scrape_interval_seconds: 30,
- // Dashboard variable refresh option on Grafana (https://grafana.com/docs/grafana/latest/datasources/prometheus/).
- // 0 : Never (Will never refresh the Dashboard variables values)
- // 1 : On Dashboard Load (Will refresh Dashboards variables when dashboard are loaded)
- // 2 : On Time Range Change (Will refresh Dashboards variables when time range will be changed)
- dashboard_var_refresh: 2,
- // clusterLabel is used to identify a cluster.
- clusterLabel: 'job',
- },
-
- prometheusAlerts+:: {
- groups+: [
- {
- name: 'etcd',
- rules: [
- {
- alert: 'etcdMembersDown',
- expr: |||
- max without (endpoint) (
- sum without (%(etcd_instance_labels)s) (up{%(etcd_selector)s} == bool 0)
- or
- count without (To) (
- sum without (%(etcd_instance_labels)s) (rate(etcd_network_peer_sent_failures_total{%(etcd_selector)s}[%(network_failure_range)ss])) > 0.01
- )
- )
- > 0
- ||| % { etcd_instance_labels: $._config.etcd_instance_labels, etcd_selector: $._config.etcd_selector, network_failure_range: $._config.scrape_interval_seconds * 4 },
- 'for': '10m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": members are down ({{ $value }}).' % $._config.clusterLabel,
- summary: 'etcd cluster members are down.',
- },
- },
- {
- alert: 'etcdInsufficientMembers',
- expr: |||
- sum(up{%(etcd_selector)s} == bool 1) without (%(etcd_instance_labels)s) < ((count(up{%(etcd_selector)s}) without (%(etcd_instance_labels)s) + 1) / 2)
- ||| % $._config,
- 'for': '3m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": insufficient members ({{ $value }}).' % $._config.clusterLabel,
- summary: 'etcd cluster has insufficient number of members.',
- },
- },
- {
- alert: 'etcdNoLeader',
- expr: |||
- etcd_server_has_leader{%(etcd_selector)s} == 0
- ||| % $._config,
- 'for': '1m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": member {{ $labels.instance }} has no leader.' % $._config.clusterLabel,
- summary: 'etcd cluster has no leader.',
- },
- },
- {
- alert: 'etcdHighNumberOfLeaderChanges',
- expr: |||
- increase((max without (%(etcd_instance_labels)s) (etcd_server_leader_changes_seen_total{%(etcd_selector)s}) or 0*absent(etcd_server_leader_changes_seen_total{%(etcd_selector)s}))[15m:1m]) >= 4
- ||| % $._config,
- 'for': '5m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": {{ $value }} leader changes within the last 15 minutes. Frequent elections may be a sign of insufficient resources, high network latency, or disruptions by other components and should be investigated.' % $._config.clusterLabel,
- summary: 'etcd cluster has high number of leader changes.',
- },
- },
- {
- alert: 'etcdHighNumberOfFailedGRPCRequests',
- expr: |||
- 100 * sum(rate(grpc_server_handled_total{%(etcd_selector)s, grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code)
- /
- sum(rate(grpc_server_handled_total{%(etcd_selector)s}[5m])) without (grpc_type, grpc_code)
- > 1
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": {{ $value }}%% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
- summary: 'etcd cluster has high number of failed grpc requests.',
- },
- },
- {
- alert: 'etcdHighNumberOfFailedGRPCRequests',
- expr: |||
- 100 * sum(rate(grpc_server_handled_total{%(etcd_selector)s, grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code)
- /
- sum(rate(grpc_server_handled_total{%(etcd_selector)s}[5m])) without (grpc_type, grpc_code)
- > 5
- ||| % $._config,
- 'for': '5m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": {{ $value }}%% of requests for {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
- summary: 'etcd cluster has high number of failed grpc requests.',
- },
- },
- {
- alert: 'etcdGRPCRequestsSlow',
- expr: |||
- histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{%(etcd_selector)s, grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type))
- > 0.15
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": 99th percentile of gRPC requests is {{ $value }}s on etcd instance {{ $labels.instance }} for {{ $labels.grpc_method }} method.' % $._config.clusterLabel,
- summary: 'etcd grpc requests are slow',
- },
- },
- {
- alert: 'etcdMemberCommunicationSlow',
- expr: |||
- histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{%(etcd_selector)s}[5m]))
- > 0.15
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": member communication with {{ $labels.To }} is taking {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
- summary: 'etcd cluster member communication is slow.',
- },
- },
- {
- alert: 'etcdHighNumberOfFailedProposals',
- expr: |||
- rate(etcd_server_proposals_failed_total{%(etcd_selector)s}[15m]) > 5
- ||| % $._config,
- 'for': '15m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": {{ $value }} proposal failures within the last 30 minutes on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
- summary: 'etcd cluster has high number of proposal failures.',
- },
- },
- {
- alert: 'etcdHighFsyncDurations',
- expr: |||
- histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{%(etcd_selector)s}[5m]))
- > 0.5
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
- summary: 'etcd cluster 99th percentile fsync durations are too high.',
- },
- },
- {
- alert: 'etcdHighFsyncDurations',
- expr: |||
- histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{%(etcd_selector)s}[5m]))
- > 1
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": 99th percentile fsync durations are {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
- summary: 'etcd cluster 99th percentile fsync durations are too high.',
- },
- },
- {
- alert: 'etcdHighCommitDurations',
- expr: |||
- histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{%(etcd_selector)s}[5m]))
- > 0.25
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": 99th percentile commit durations {{ $value }}s on etcd instance {{ $labels.instance }}.' % $._config.clusterLabel,
- summary: 'etcd cluster 99th percentile commit durations are too high.',
- },
- },
- {
- alert: 'etcdDatabaseQuotaLowSpace',
- expr: |||
- (last_over_time(etcd_mvcc_db_total_size_in_bytes[5m]) / last_over_time(etcd_server_quota_backend_bytes[5m]))*100 > 95
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'critical',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": database size exceeds the defined quota on etcd instance {{ $labels.instance }}, please defrag or increase the quota as the writes to etcd will be disabled when it is full.' % $._config.clusterLabel,
- summary: 'etcd cluster database is running full.',
- },
- },
- {
- alert: 'etcdExcessiveDatabaseGrowth',
- expr: |||
- predict_linear(etcd_mvcc_db_total_size_in_bytes[4h], 4*60*60) > etcd_server_quota_backend_bytes
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance {{ $labels.instance }}, please check as it might be disruptive.' % $._config.clusterLabel,
- summary: 'etcd cluster database growing very fast.',
- },
- },
- {
- alert: 'etcdDatabaseHighFragmentationRatio',
- expr: |||
- (last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes[5m])) < 0.5
- ||| % $._config,
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- annotations: {
- description: 'etcd cluster "{{ $labels.%s }}": database size in use on instance {{ $labels.instance }} is {{ $value | humanizePercentage }} of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.' % $._config.clusterLabel,
- summary: 'etcd database size in use is less than 50% of the actual allocated storage.',
- runbook_url: 'https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation',
- },
- },
- ],
- },
- ],
- },
-
- grafanaDashboards+:: {
- 'etcd.json': {
- uid: std.md5('etcd.json'),
- title: 'etcd',
- description: 'etcd sample Grafana dashboard with Prometheus',
- tags: ['etcd-mixin'],
- style: 'dark',
- timezone: 'browser',
- editable: true,
- hideControls: false,
- sharedCrosshair: false,
- rows: [
- {
- collapse: false,
- editable: true,
- height: '250px',
- panels: [
- {
- cacheTimeout: null,
- colorBackground: false,
- colorValue: false,
- colors: [
- 'rgba(245, 54, 54, 0.9)',
- 'rgba(237, 129, 40, 0.89)',
- 'rgba(50, 172, 45, 0.97)',
- ],
- datasource: '$datasource',
- editable: true,
- 'error': false,
- format: 'none',
- gauge: {
- maxValue: 100,
- minValue: 0,
- show: false,
- thresholdLabels: false,
- thresholdMarkers: true,
- },
- id: 28,
- interval: null,
- isNew: true,
- links: [],
- mappingType: 1,
- mappingTypes: [
- {
- name: 'value to text',
- value: 1,
- },
- {
- name: 'range to text',
- value: 2,
- },
- ],
- maxDataPoints: 100,
- nullPointMode: 'connected',
- nullText: null,
- postfix: '',
- postfixFontSize: '50%',
- prefix: '',
- prefixFontSize: '50%',
- rangeMaps: [{
- from: 'null',
- text: 'N/A',
- to: 'null',
- }],
- span: 3,
- sparkline: {
- fillColor: 'rgba(31, 118, 189, 0.18)',
- full: false,
- lineColor: 'rgb(31, 120, 193)',
- show: false,
- },
- targets: [{
- expr: 'sum(etcd_server_has_leader{%s="$cluster"})' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: '',
- metric: 'etcd_server_has_leader',
- refId: 'A',
- step: 20,
- }],
- thresholds: '',
- title: 'Up',
- type: 'singlestat',
- valueFontSize: '200%',
- valueMaps: [{
- op: '=',
- text: 'N/A',
- value: 'null',
- }],
- valueName: 'avg',
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 23,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 5,
- stack: false,
- steppedLine: false,
- targets: [
- {
- expr: 'sum(rate(grpc_server_started_total{%s="$cluster",grpc_type="unary"}[$__rate_interval]))' % $._config.clusterLabel,
- format: 'time_series',
- intervalFactor: 2,
- legendFormat: 'RPC Rate',
- metric: 'grpc_server_started_total',
- refId: 'A',
- step: 2,
- },
- {
- expr: 'sum(rate(grpc_server_handled_total{%s="$cluster",grpc_type="unary",grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[$__rate_interval]))' % $._config.clusterLabel,
- format: 'time_series',
- intervalFactor: 2,
- legendFormat: 'RPC Failed Rate',
- metric: 'grpc_server_handled_total',
- refId: 'B',
- step: 2,
- },
- ],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'RPC Rate',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'ops',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 41,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 4,
- stack: true,
- steppedLine: false,
- targets: [
- {
- expr: 'sum(grpc_server_started_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})' % $._config,
- intervalFactor: 2,
- legendFormat: 'Watch Streams',
- metric: 'grpc_server_handled_total',
- refId: 'A',
- step: 4,
- },
- {
- expr: 'sum(grpc_server_started_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{%(clusterLabel)s="$cluster",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})' % $._config,
- intervalFactor: 2,
- legendFormat: 'Lease Streams',
- metric: 'grpc_server_handled_total',
- refId: 'B',
- step: 4,
- },
- ],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Active Streams',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'short',
- label: '',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- ],
- showTitle: false,
- title: 'Row',
- },
- {
- collapse: false,
- editable: true,
- height: '250px',
- panels: [
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- decimals: null,
- editable: true,
- 'error': false,
- fill: 0,
- grid: {},
- id: 1,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 4,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'etcd_mvcc_db_total_size_in_bytes{%s="$cluster"}' % $._config.clusterLabel,
- hide: false,
- interval: '',
- intervalFactor: 2,
- legendFormat: '{{instance}} DB Size',
- metric: '',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'DB Size',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'cumulative',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'bytes',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- logBase: 1,
- max: null,
- min: null,
- show: false,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- grid: {},
- id: 3,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 1,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 4,
- stack: false,
- steppedLine: true,
- targets: [
- {
- expr: 'histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{%s="$cluster"}[$__rate_interval])) by (instance, le))' % $._config.clusterLabel,
- hide: false,
- intervalFactor: 2,
- legendFormat: '{{instance}} WAL fsync',
- metric: 'etcd_disk_wal_fsync_duration_seconds_bucket',
- refId: 'A',
- step: 4,
- },
- {
- expr: 'histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{%s="$cluster"}[$__rate_interval])) by (instance, le))' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: '{{instance}} DB fsync',
- metric: 'etcd_disk_backend_commit_duration_seconds_bucket',
- refId: 'B',
- step: 4,
- },
- ],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Disk Sync Duration',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'cumulative',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 's',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- logBase: 1,
- max: null,
- min: null,
- show: false,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 29,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 4,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'process_resident_memory_bytes{%s="$cluster"}' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: '{{instance}} Resident Memory',
- metric: 'process_resident_memory_bytes',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Memory',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'bytes',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- ],
- title: 'New row',
- },
- {
- collapse: false,
- editable: true,
- height: '250px',
- panels: [
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 5,
- id: 22,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 3,
- stack: true,
- steppedLine: false,
- targets: [{
- expr: 'rate(etcd_network_client_grpc_received_bytes_total{%s="$cluster"}[$__rate_interval])' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: '{{instance}} Client Traffic In',
- metric: 'etcd_network_client_grpc_received_bytes_total',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Client Traffic In',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'Bps',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 5,
- id: 21,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 3,
- stack: true,
- steppedLine: false,
- targets: [{
- expr: 'rate(etcd_network_client_grpc_sent_bytes_total{%s="$cluster"}[$__rate_interval])' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: '{{instance}} Client Traffic Out',
- metric: 'etcd_network_client_grpc_sent_bytes_total',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Client Traffic Out',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'Bps',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 20,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 3,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'sum(rate(etcd_network_peer_received_bytes_total{%s="$cluster"}[$__rate_interval])) by (instance)' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: '{{instance}} Peer Traffic In',
- metric: 'etcd_network_peer_received_bytes_total',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Peer Traffic In',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'Bps',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- decimals: null,
- editable: true,
- 'error': false,
- fill: 0,
- grid: {},
- id: 16,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 3,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'sum(rate(etcd_network_peer_sent_bytes_total{%s="$cluster"}[$__rate_interval])) by (instance)' % $._config.clusterLabel,
- hide: false,
- interval: '',
- intervalFactor: 2,
- legendFormat: '{{instance}} Peer Traffic Out',
- metric: 'etcd_network_peer_sent_bytes_total',
- refId: 'A',
- step: 4,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Peer Traffic Out',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'cumulative',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'Bps',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- ],
- title: 'New row',
- },
- {
- collapse: false,
- editable: true,
- height: '250px',
- panels: [
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- editable: true,
- 'error': false,
- fill: 0,
- id: 40,
- isNew: true,
- legend: {
- avg: false,
- current: false,
- max: false,
- min: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 6,
- stack: false,
- steppedLine: false,
- targets: [
- {
- expr: 'sum(rate(etcd_server_proposals_failed_total{%s="$cluster"}[$__rate_interval]))' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: 'Proposal Failure Rate',
- metric: 'etcd_server_proposals_failed_total',
- refId: 'A',
- step: 2,
- },
- {
- expr: 'sum(etcd_server_proposals_pending{%s="$cluster"})' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: 'Proposal Pending Total',
- metric: 'etcd_server_proposals_pending',
- refId: 'B',
- step: 2,
- },
- {
- expr: 'sum(rate(etcd_server_proposals_committed_total{%s="$cluster"}[$__rate_interval]))' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: 'Proposal Commit Rate',
- metric: 'etcd_server_proposals_committed_total',
- refId: 'C',
- step: 2,
- },
- {
- expr: 'sum(rate(etcd_server_proposals_applied_total{%s="$cluster"}[$__rate_interval]))' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: 'Proposal Apply Rate',
- refId: 'D',
- step: 2,
- },
- ],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Raft Proposals',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'short',
- label: '',
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- datasource: '$datasource',
- decimals: 0,
- editable: true,
- 'error': false,
- fill: 0,
- id: 19,
- isNew: true,
- legend: {
- alignAsTable: false,
- avg: false,
- current: false,
- max: false,
- min: false,
- rightSide: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- percentage: false,
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- span: 6,
- stack: false,
- steppedLine: false,
- targets: [{
- expr: 'changes(etcd_server_leader_changes_seen_total{%s="$cluster"}[1d])' % $._config.clusterLabel,
- intervalFactor: 2,
- legendFormat: '{{instance}} Total Leader Elections Per Day',
- metric: 'etcd_server_leader_changes_seen_total',
- refId: 'A',
- step: 2,
- }],
- thresholds: [],
- timeFrom: null,
- timeShift: null,
- title: 'Total Leader Elections Per Day',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- },
- {
- aliasColors: {},
- bars: false,
- dashLength: 10,
- dashes: false,
- datasource: '$datasource',
- decimals: 0,
- editable: true,
- 'error': false,
- fieldConfig: {
- defaults: {
- custom: {},
- },
- overrides: [],
- },
- fill: 0,
- fillGradient: 0,
- gridPos: {
- h: 7,
- w: 12,
- x: 0,
- y: 28,
- },
- hiddenSeries: false,
- id: 42,
- isNew: true,
- legend: {
- alignAsTable: false,
- avg: false,
- current: false,
- max: false,
- min: false,
- rightSide: false,
- show: false,
- total: false,
- values: false,
- },
- lines: true,
- linewidth: 2,
- links: [],
- nullPointMode: 'connected',
- options: {
- alertThreshold: true,
- },
- percentage: false,
- pluginVersion: '7.4.3',
- pointradius: 5,
- points: false,
- renderer: 'flot',
- seriesOverrides: [],
- spaceLength: 10,
- stack: false,
- steppedLine: false,
- targets: [
- {
- expr: 'histogram_quantile(0.99, sum by (instance, le) (rate(etcd_network_peer_round_trip_time_seconds_bucket{%s="$cluster"}[$__rate_interval])))' % $._config.clusterLabel,
- interval: '',
- intervalFactor: 2,
- legendFormat: '{{instance}} Peer round trip time',
- metric: 'etcd_network_peer_round_trip_time_seconds_bucket',
- refId: 'A',
- step: 2,
- },
- ],
- thresholds: [],
- timeFrom: null,
- timeRegions: [],
- timeShift: null,
- title: 'Peer round trip time',
- tooltip: {
- msResolution: false,
- shared: true,
- sort: 0,
- value_type: 'individual',
- },
- type: 'graph',
- xaxis: {
- buckets: null,
- mode: 'time',
- name: null,
- show: true,
- values: [],
- },
- yaxes: [
- {
- '$$hashKey': 'object:925',
- decimals: null,
- format: 's',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- {
- '$$hashKey': 'object:926',
- format: 'short',
- label: null,
- logBase: 1,
- max: null,
- min: null,
- show: true,
- },
- ],
- yaxis: {
- align: false,
- alignLevel: null,
- },
- },
- ],
- title: 'New row',
- },
- ],
- time: {
- from: 'now-15m',
- to: 'now',
- },
- timepicker: {
- now: true,
- refresh_intervals: [
- '5s',
- '10s',
- '30s',
- '1m',
- '5m',
- '15m',
- '30m',
- '1h',
- '2h',
- '1d',
- ],
- time_options: [
- '5m',
- '15m',
- '1h',
- '6h',
- '12h',
- '24h',
- '2d',
- '7d',
- '30d',
- ],
- },
- templating: {
- list: [
- {
- current: {
- text: 'Prometheus',
- value: 'Prometheus',
- },
- hide: 0,
- label: 'Data Source',
- name: 'datasource',
- options: [],
- query: 'prometheus',
- refresh: 1,
- regex: '',
- type: 'datasource',
- },
- {
- allValue: null,
- current: {
- text: 'prod',
- value: 'prod',
- },
- datasource: '$datasource',
- hide: 0,
- includeAll: false,
- label: 'cluster',
- multi: false,
- name: 'cluster',
- options: [],
- query: 'label_values(etcd_server_has_leader, %s)' % $._config.clusterLabel,
- refresh: $._config.dashboard_var_refresh,
- regex: '',
- sort: 2,
- tagValuesQuery: '',
- tags: [],
- tagsQuery: '',
- type: 'query',
- useTags: false,
- },
- ],
- },
- annotations: {
- list: [],
- },
- refresh: '10s',
- schemaVersion: 13,
- version: 215,
- links: [],
- gnetId: null,
- },
- },
-}
diff --git a/contrib/mixin/test.yaml b/contrib/mixin/test.yaml
deleted file mode 100644
index 8cf18a0e080..00000000000
--- a/contrib/mixin/test.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-rule_files:
- - manifests/etcd-prometheusRules.yaml
-
-evaluation_interval: 1m
-
-tests:
- - interval: 1m
- input_series:
- - series: 'up{job="etcd",instance="10.10.10.0"}'
- values: '1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.1"}'
- values: '1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.2"}'
- values: '1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0'
- alert_rule_test:
- - eval_time: 3m
- alertname: etcdInsufficientMembers
- - eval_time: 5m
- alertname: etcdInsufficientMembers
- - eval_time: 12m
- alertname: etcdMembersDown
- - eval_time: 14m
- alertname: etcdMembersDown
- exp_alerts:
- - exp_labels:
- job: etcd
- severity: critical
- exp_annotations:
- description: 'etcd cluster "etcd": members are down (3).'
- summary: 'etcd cluster members are down.'
- - eval_time: 7m
- alertname: etcdInsufficientMembers
- - eval_time: 11m
- alertname: etcdInsufficientMembers
- exp_alerts:
- - exp_labels:
- job: etcd
- severity: critical
- exp_annotations:
- description: 'etcd cluster "etcd": insufficient members (1).'
- summary: 'etcd cluster has insufficient number of members.'
- - eval_time: 15m
- alertname: etcdInsufficientMembers
- exp_alerts:
- - exp_labels:
- job: etcd
- severity: critical
- exp_annotations:
- description: 'etcd cluster "etcd": insufficient members (0).'
- summary: 'etcd cluster has insufficient number of members.'
-
- - interval: 1m
- input_series:
- - series: 'up{job="etcd",instance="10.10.10.0"}'
- values: '1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.1"}'
- values: '1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.2"}'
- values: '1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
- alert_rule_test:
- - eval_time: 14m
- alertname: etcdMembersDown
- exp_alerts:
- - exp_labels:
- job: etcd
- severity: critical
- exp_annotations:
- description: 'etcd cluster "etcd": members are down (3).'
- summary: 'etcd cluster members are down.'
-
- - interval: 1m
- input_series:
- - series: 'up{job="etcd",instance="10.10.10.0"}'
- values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0'
- - series: 'up{job="etcd",instance="10.10.10.1"}'
- values: '1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0'
- - series: 'etcd_network_peer_sent_failures_total{To="member-1",job="etcd",endpoint="test"}'
- values: '0 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18'
- alert_rule_test:
- - eval_time: 13m
- alertname: etcdMembersDown
- exp_alerts:
- - exp_labels:
- job: etcd
- severity: critical
- exp_annotations:
- description: 'etcd cluster "etcd": members are down (1).'
- summary: 'etcd cluster members are down.'
-
- - interval: 1m
- input_series:
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.0"}'
- values: '0 0 2 0 0 1 0 0 0 0 0 0 0 0 0 0'
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.1"}'
- values: '0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0'
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.2"}'
- values: '0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
- alert_rule_test:
- - eval_time: 10m
- alertname: etcdHighNumberOfLeaderChanges
- exp_alerts:
- - exp_labels:
- job: etcd
- severity: warning
- exp_annotations:
- description: 'etcd cluster "etcd": 4 leader changes within the last 15 minutes. Frequent elections may be a sign of insufficient resources, high network latency, or disruptions by other components and should be investigated.'
- summary: 'etcd cluster has high number of leader changes.'
- - interval: 1m
- input_series:
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.0"}'
- values: '0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0'
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.1"}'
- values: '0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0'
- - series: 'etcd_server_leader_changes_seen_total{job="etcd",instance="10.10.10.2"}'
- values: '0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
- alert_rule_test:
- - eval_time: 10m
- alertname: etcdHighNumberOfLeaderChanges
- exp_alerts:
-
- - interval: 1m
- input_series:
- - series: 'etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.0"}'
- values: '0+8192x240'
- - series: 'etcd_server_quota_backend_bytes{job="etcd",instance="10.10.10.0"}'
- values: '524288+0x240'
- - series: 'etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.1"}'
- values: '0+1024x240'
- - series: 'etcd_server_quota_backend_bytes{job="etcd",instance="10.10.10.1"}'
- values: '524288+0x240'
- alert_rule_test:
- - eval_time: 11m
- alertname: etcdExcessiveDatabaseGrowth
- exp_alerts:
- - exp_labels:
- instance: '10.10.10.0'
- job: etcd
- severity: warning
- exp_annotations:
- description: 'etcd cluster "etcd": Predicting running out of disk space in the next four hours, based on write observations within the past four hours on etcd instance 10.10.10.0, please check as it might be disruptive.'
- summary: 'etcd cluster database growing very fast.'
-
- - interval: 1m
- input_series:
- - series: 'etcd_mvcc_db_total_size_in_use_in_bytes{job="etcd",instance="10.10.10.0"}'
- values: '30000+0x10'
- - series: 'etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.0"}'
- values: '100000+0x10'
- - series: 'etcd_mvcc_db_total_size_in_use_in_bytes{job="etcd",instance="10.10.10.1"}'
- values: '70000+0x10'
- - series: 'etcd_mvcc_db_total_size_in_bytes{job="etcd",instance="10.10.10.1"}'
- values: '100000+0x10'
- alert_rule_test:
- - eval_time: 11m
- alertname: etcdDatabaseHighFragmentationRatio
- exp_alerts:
- - exp_labels:
- instance: '10.10.10.0'
- job: etcd
- severity: warning
- exp_annotations:
- description: 'etcd cluster "etcd": database size in use on instance 10.10.10.0 is 30% of the actual allocated disk space, please run defragmentation (e.g. etcdctl defrag) to retrieve the unused fragmented disk space.'
- runbook_url: https://etcd.io/docs/v3.5/op-guide/maintenance/#defragmentation
- summary: 'etcd database size in use is less than 50% of the actual allocated storage.'
diff --git a/contrib/raftexample/Procfile b/contrib/raftexample/Procfile
deleted file mode 100644
index f6e87132693..00000000000
--- a/contrib/raftexample/Procfile
+++ /dev/null
@@ -1,4 +0,0 @@
-# Use goreman to run `go install github.com/mattn/goreman@latest`
-raftexample1: ./raftexample --id 1 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 12380
-raftexample2: ./raftexample --id 2 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 22380
-raftexample3: ./raftexample --id 3 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 32380
diff --git a/contrib/raftexample/README.md b/contrib/raftexample/README.md
deleted file mode 100644
index 2e73996a6a6..00000000000
--- a/contrib/raftexample/README.md
+++ /dev/null
@@ -1,125 +0,0 @@
-# raftexample
-
-raftexample is an example usage of etcd's [raft library](../../raft). It provides a simple REST API for a key-value store cluster backed by the [Raft][raft] consensus algorithm.
-
-[raft]: http://raftconsensus.github.io/
-
-## Getting Started
-
-### Building raftexample
-
-Clone `etcd` to `/src/go.etcd.io/etcd`
-
-```sh
-export GOPATH=
-cd /src/go.etcd.io/etcd/contrib/raftexample
-go build -o raftexample
-```
-
-### Running single node raftexample
-
-First start a single-member cluster of raftexample:
-
-```sh
-raftexample --id 1 --cluster http://127.0.0.1:12379 --port 12380
-```
-
-Each raftexample process maintains a single raft instance and a key-value server.
-The process's list of comma separated peers (--cluster), its raft ID index into the peer list (--id), and http key-value server port (--port) are passed through the command line.
-
-Next, store a value ("hello") to a key ("my-key"):
-
-```
-curl -L http://127.0.0.1:12380/my-key -XPUT -d hello
-```
-
-Finally, retrieve the stored key:
-
-```
-curl -L http://127.0.0.1:12380/my-key
-```
-
-### Running a local cluster
-
-First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
-
-The [Procfile script](./Procfile) will set up a local example cluster. Start it with:
-
-```sh
-goreman start
-```
-
-This will bring up three raftexample instances.
-
-Now it's possible to write a key-value pair to any member of the cluster and likewise retrieve it from any member.
-
-### Fault Tolerance
-
-To test cluster recovery, first start a cluster and write a value "foo":
-```sh
-goreman start
-curl -L http://127.0.0.1:12380/my-key -XPUT -d foo
-```
-
-Next, remove a node and replace the value with "bar" to check cluster availability:
-
-```sh
-goreman run stop raftexample2
-curl -L http://127.0.0.1:12380/my-key -XPUT -d bar
-curl -L http://127.0.0.1:32380/my-key
-```
-
-Finally, bring the node back up and verify it recovers with the updated value "bar":
-```sh
-goreman run start raftexample2
-curl -L http://127.0.0.1:22380/my-key
-```
-
-### Dynamic cluster reconfiguration
-
-Nodes can be added to or removed from a running cluster using requests to the REST API.
-
-For example, suppose we have a 3-node cluster that was started with the commands:
-```sh
-raftexample --id 1 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 12380
-raftexample --id 2 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 22380
-raftexample --id 3 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379 --port 32380
-```
-
-A fourth node with ID 4 can be added by issuing a POST:
-```sh
-curl -L http://127.0.0.1:12380/4 -XPOST -d http://127.0.0.1:42379
-```
-
-Then the new node can be started as the others were, using the --join option:
-```sh
-raftexample --id 4 --cluster http://127.0.0.1:12379,http://127.0.0.1:22379,http://127.0.0.1:32379,http://127.0.0.1:42379 --port 42380 --join
-```
-
-The new node should join the cluster and be able to service key/value requests.
-
-We can remove a node using a DELETE request:
-```sh
-curl -L http://127.0.0.1:12380/3 -XDELETE
-```
-
-Node 3 should shut itself down once the cluster has processed this request.
-
-## Design
-
-The raftexample consists of three components: a raft-backed key-value store, a REST API server, and a raft consensus server based on etcd's raft implementation.
-
-The raft-backed key-value store is a key-value map that holds all committed key-values.
-The store bridges communication between the raft server and the REST server.
-Key-value updates are issued through the store to the raft server.
-The store updates its map once raft reports the updates are committed.
-
-The REST server exposes the current raft consensus by accessing the raft-backed key-value store.
-A GET command looks up a key in the store and returns the value, if any.
-A key-value PUT command issues an update proposal to the store.
-
-The raft server participates in consensus with its cluster peers.
-When the REST server submits a proposal, the raft server transmits the proposal to its peers.
-When raft reaches a consensus, the server publishes all committed updates over a commit channel.
-For raftexample, this commit channel is consumed by the key-value store.
-
diff --git a/contrib/raftexample/doc.go b/contrib/raftexample/doc.go
deleted file mode 100644
index b2dc8416037..00000000000
--- a/contrib/raftexample/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// raftexample is a simple KV store using the raft and rafthttp libraries.
-package main
diff --git a/contrib/raftexample/httpapi.go b/contrib/raftexample/httpapi.go
deleted file mode 100644
index dbe226add33..00000000000
--- a/contrib/raftexample/httpapi.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "io"
- "log"
- "net/http"
- "strconv"
-
- "go.etcd.io/raft/v3/raftpb"
-)
-
-// Handler for a http based key-value store backed by raft
-type httpKVAPI struct {
- store *kvstore
- confChangeC chan<- raftpb.ConfChange
-}
-
-func (h *httpKVAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- key := r.RequestURI
- defer r.Body.Close()
- switch r.Method {
- case http.MethodPut:
- v, err := io.ReadAll(r.Body)
- if err != nil {
- log.Printf("Failed to read on PUT (%v)\n", err)
- http.Error(w, "Failed on PUT", http.StatusBadRequest)
- return
- }
-
- h.store.Propose(key, string(v))
-
- // Optimistic-- no waiting for ack from raft. Value is not yet
- // committed so a subsequent GET on the key may return old value
- w.WriteHeader(http.StatusNoContent)
- case http.MethodGet:
- if v, ok := h.store.Lookup(key); ok {
- w.Write([]byte(v))
- } else {
- http.Error(w, "Failed to GET", http.StatusNotFound)
- }
- case http.MethodPost:
- url, err := io.ReadAll(r.Body)
- if err != nil {
- log.Printf("Failed to read on POST (%v)\n", err)
- http.Error(w, "Failed on POST", http.StatusBadRequest)
- return
- }
-
- nodeID, err := strconv.ParseUint(key[1:], 0, 64)
- if err != nil {
- log.Printf("Failed to convert ID for conf change (%v)\n", err)
- http.Error(w, "Failed on POST", http.StatusBadRequest)
- return
- }
-
- cc := raftpb.ConfChange{
- Type: raftpb.ConfChangeAddNode,
- NodeID: nodeID,
- Context: url,
- }
- h.confChangeC <- cc
- // As above, optimistic that raft will apply the conf change
- w.WriteHeader(http.StatusNoContent)
- case http.MethodDelete:
- nodeID, err := strconv.ParseUint(key[1:], 0, 64)
- if err != nil {
- log.Printf("Failed to convert ID for conf change (%v)\n", err)
- http.Error(w, "Failed on DELETE", http.StatusBadRequest)
- return
- }
-
- cc := raftpb.ConfChange{
- Type: raftpb.ConfChangeRemoveNode,
- NodeID: nodeID,
- }
- h.confChangeC <- cc
-
- // As above, optimistic that raft will apply the conf change
- w.WriteHeader(http.StatusNoContent)
- default:
- w.Header().Set("Allow", http.MethodPut)
- w.Header().Add("Allow", http.MethodGet)
- w.Header().Add("Allow", http.MethodPost)
- w.Header().Add("Allow", http.MethodDelete)
- http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
- }
-}
-
-// serveHTTPKVAPI starts a key-value server with a GET/PUT API and listens.
-func serveHTTPKVAPI(kv *kvstore, port int, confChangeC chan<- raftpb.ConfChange, errorC <-chan error) {
- srv := http.Server{
- Addr: ":" + strconv.Itoa(port),
- Handler: &httpKVAPI{
- store: kv,
- confChangeC: confChangeC,
- },
- }
- go func() {
- if err := srv.ListenAndServe(); err != nil {
- log.Fatal(err)
- }
- }()
-
- // exit when raft goes down
- if err, ok := <-errorC; ok {
- log.Fatal(err)
- }
-}
diff --git a/contrib/raftexample/kvstore.go b/contrib/raftexample/kvstore.go
deleted file mode 100644
index 22f8915fe1c..00000000000
--- a/contrib/raftexample/kvstore.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "bytes"
- "encoding/gob"
- "encoding/json"
- "log"
- "sync"
-
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/raft/v3/raftpb"
-)
-
-// a key-value store backed by raft
-type kvstore struct {
- proposeC chan<- string // channel for proposing updates
- mu sync.RWMutex
- kvStore map[string]string // current committed key-value pairs
- snapshotter *snap.Snapshotter
-}
-
-type kv struct {
- Key string
- Val string
-}
-
-func newKVStore(snapshotter *snap.Snapshotter, proposeC chan<- string, commitC <-chan *commit, errorC <-chan error) *kvstore {
- s := &kvstore{proposeC: proposeC, kvStore: make(map[string]string), snapshotter: snapshotter}
- snapshot, err := s.loadSnapshot()
- if err != nil {
- log.Panic(err)
- }
- if snapshot != nil {
- log.Printf("loading snapshot at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index)
- if err := s.recoverFromSnapshot(snapshot.Data); err != nil {
- log.Panic(err)
- }
- }
- // read commits from raft into kvStore map until error
- go s.readCommits(commitC, errorC)
- return s
-}
-
-func (s *kvstore) Lookup(key string) (string, bool) {
- s.mu.RLock()
- defer s.mu.RUnlock()
- v, ok := s.kvStore[key]
- return v, ok
-}
-
-func (s *kvstore) Propose(k string, v string) {
- var buf bytes.Buffer
- if err := gob.NewEncoder(&buf).Encode(kv{k, v}); err != nil {
- log.Fatal(err)
- }
- s.proposeC <- buf.String()
-}
-
-func (s *kvstore) readCommits(commitC <-chan *commit, errorC <-chan error) {
- for commit := range commitC {
- if commit == nil {
- // signaled to load snapshot
- snapshot, err := s.loadSnapshot()
- if err != nil {
- log.Panic(err)
- }
- if snapshot != nil {
- log.Printf("loading snapshot at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index)
- if err := s.recoverFromSnapshot(snapshot.Data); err != nil {
- log.Panic(err)
- }
- }
- continue
- }
-
- for _, data := range commit.data {
- var dataKv kv
- dec := gob.NewDecoder(bytes.NewBufferString(data))
- if err := dec.Decode(&dataKv); err != nil {
- log.Fatalf("raftexample: could not decode message (%v)", err)
- }
- s.mu.Lock()
- s.kvStore[dataKv.Key] = dataKv.Val
- s.mu.Unlock()
- }
- close(commit.applyDoneC)
- }
- if err, ok := <-errorC; ok {
- log.Fatal(err)
- }
-}
-
-func (s *kvstore) getSnapshot() ([]byte, error) {
- s.mu.RLock()
- defer s.mu.RUnlock()
- return json.Marshal(s.kvStore)
-}
-
-func (s *kvstore) loadSnapshot() (*raftpb.Snapshot, error) {
- snapshot, err := s.snapshotter.Load()
- if err == snap.ErrNoSnapshot {
- return nil, nil
- }
- if err != nil {
- return nil, err
- }
- return snapshot, nil
-}
-
-func (s *kvstore) recoverFromSnapshot(snapshot []byte) error {
- var store map[string]string
- if err := json.Unmarshal(snapshot, &store); err != nil {
- return err
- }
- s.mu.Lock()
- defer s.mu.Unlock()
- s.kvStore = store
- return nil
-}
diff --git a/contrib/raftexample/kvstore_test.go b/contrib/raftexample/kvstore_test.go
deleted file mode 100644
index 231f778f2ee..00000000000
--- a/contrib/raftexample/kvstore_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "reflect"
- "testing"
-)
-
-func Test_kvstore_snapshot(t *testing.T) {
- tm := map[string]string{"foo": "bar"}
- s := &kvstore{kvStore: tm}
-
- v, _ := s.Lookup("foo")
- if v != "bar" {
- t.Fatalf("foo has unexpected value, got %s", v)
- }
-
- data, err := s.getSnapshot()
- if err != nil {
- t.Fatal(err)
- }
- s.kvStore = nil
-
- if err := s.recoverFromSnapshot(data); err != nil {
- t.Fatal(err)
- }
- v, _ = s.Lookup("foo")
- if v != "bar" {
- t.Fatalf("foo has unexpected value, got %s", v)
- }
- if !reflect.DeepEqual(s.kvStore, tm) {
- t.Fatalf("store expected %+v, got %+v", tm, s.kvStore)
- }
-}
diff --git a/contrib/raftexample/listener.go b/contrib/raftexample/listener.go
deleted file mode 100644
index d67e16f5dee..00000000000
--- a/contrib/raftexample/listener.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "errors"
- "net"
- "time"
-)
-
-// stoppableListener sets TCP keep-alive timeouts on accepted
-// connections and waits on stopc message
-type stoppableListener struct {
- *net.TCPListener
- stopc <-chan struct{}
-}
-
-func newStoppableListener(addr string, stopc <-chan struct{}) (*stoppableListener, error) {
- ln, err := net.Listen("tcp", addr)
- if err != nil {
- return nil, err
- }
- return &stoppableListener{ln.(*net.TCPListener), stopc}, nil
-}
-
-func (ln stoppableListener) Accept() (c net.Conn, err error) {
- connc := make(chan *net.TCPConn, 1)
- errc := make(chan error, 1)
- go func() {
- tc, err := ln.AcceptTCP()
- if err != nil {
- errc <- err
- return
- }
- connc <- tc
- }()
- select {
- case <-ln.stopc:
- return nil, errors.New("server stopped")
- case err := <-errc:
- return nil, err
- case tc := <-connc:
- tc.SetKeepAlive(true)
- tc.SetKeepAlivePeriod(3 * time.Minute)
- return tc, nil
- }
-}
diff --git a/contrib/raftexample/main.go b/contrib/raftexample/main.go
deleted file mode 100644
index 73f02787a35..00000000000
--- a/contrib/raftexample/main.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "flag"
- "strings"
-
- "go.etcd.io/raft/v3/raftpb"
-)
-
-func main() {
- cluster := flag.String("cluster", "http://127.0.0.1:9021", "comma separated cluster peers")
- id := flag.Int("id", 1, "node ID")
- kvport := flag.Int("port", 9121, "key-value server port")
- join := flag.Bool("join", false, "join an existing cluster")
- flag.Parse()
-
- proposeC := make(chan string)
- defer close(proposeC)
- confChangeC := make(chan raftpb.ConfChange)
- defer close(confChangeC)
-
- // raft provides a commit stream for the proposals from the http api
- var kvs *kvstore
- getSnapshot := func() ([]byte, error) { return kvs.getSnapshot() }
- commitC, errorC, snapshotterReady := newRaftNode(*id, strings.Split(*cluster, ","), *join, getSnapshot, proposeC, confChangeC)
-
- kvs = newKVStore(<-snapshotterReady, proposeC, commitC, errorC)
-
- // the key-value http handler will propose updates to raft
- serveHTTPKVAPI(kvs, *kvport, confChangeC, errorC)
-}
diff --git a/contrib/raftexample/raft.go b/contrib/raftexample/raft.go
deleted file mode 100644
index 971141ae359..00000000000
--- a/contrib/raftexample/raft.go
+++ /dev/null
@@ -1,522 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "context"
- "fmt"
- "log"
- "net/http"
- "net/url"
- "os"
- "strconv"
- "time"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
- "go.etcd.io/etcd/server/v3/storage/wal"
- "go.etcd.io/etcd/server/v3/storage/wal/walpb"
- "go.etcd.io/raft/v3"
- "go.etcd.io/raft/v3/raftpb"
-
- "go.uber.org/zap"
-)
-
-type commit struct {
- data []string
- applyDoneC chan<- struct{}
-}
-
-// A key-value stream backed by raft
-type raftNode struct {
- proposeC <-chan string // proposed messages (k,v)
- confChangeC <-chan raftpb.ConfChange // proposed cluster config changes
- commitC chan<- *commit // entries committed to log (k,v)
- errorC chan<- error // errors from raft session
-
- id int // client ID for raft session
- peers []string // raft peer URLs
- join bool // node is joining an existing cluster
- waldir string // path to WAL directory
- snapdir string // path to snapshot directory
- getSnapshot func() ([]byte, error)
-
- confState raftpb.ConfState
- snapshotIndex uint64
- appliedIndex uint64
-
- // raft backing for the commit/error channel
- node raft.Node
- raftStorage *raft.MemoryStorage
- wal *wal.WAL
-
- snapshotter *snap.Snapshotter
- snapshotterReady chan *snap.Snapshotter // signals when snapshotter is ready
-
- snapCount uint64
- transport *rafthttp.Transport
- stopc chan struct{} // signals proposal channel closed
- httpstopc chan struct{} // signals http server to shutdown
- httpdonec chan struct{} // signals http server shutdown complete
-
- logger *zap.Logger
-}
-
-var defaultSnapshotCount uint64 = 10000
-
-// newRaftNode initiates a raft instance and returns a committed log entry
-// channel and error channel. Proposals for log updates are sent over the
-// provided the proposal channel. All log entries are replayed over the
-// commit channel, followed by a nil message (to indicate the channel is
-// current), then new log entries. To shutdown, close proposeC and read errorC.
-func newRaftNode(id int, peers []string, join bool, getSnapshot func() ([]byte, error), proposeC <-chan string,
- confChangeC <-chan raftpb.ConfChange) (<-chan *commit, <-chan error, <-chan *snap.Snapshotter) {
-
- commitC := make(chan *commit)
- errorC := make(chan error)
-
- rc := &raftNode{
- proposeC: proposeC,
- confChangeC: confChangeC,
- commitC: commitC,
- errorC: errorC,
- id: id,
- peers: peers,
- join: join,
- waldir: fmt.Sprintf("raftexample-%d", id),
- snapdir: fmt.Sprintf("raftexample-%d-snap", id),
- getSnapshot: getSnapshot,
- snapCount: defaultSnapshotCount,
- stopc: make(chan struct{}),
- httpstopc: make(chan struct{}),
- httpdonec: make(chan struct{}),
-
- logger: zap.NewExample(),
-
- snapshotterReady: make(chan *snap.Snapshotter, 1),
- // rest of structure populated after WAL replay
- }
- go rc.startRaft()
- return commitC, errorC, rc.snapshotterReady
-}
-
-func (rc *raftNode) saveSnap(snap raftpb.Snapshot) error {
- walSnap := walpb.Snapshot{
- Index: snap.Metadata.Index,
- Term: snap.Metadata.Term,
- ConfState: &snap.Metadata.ConfState,
- }
- // save the snapshot file before writing the snapshot to the wal.
- // This makes it possible for the snapshot file to become orphaned, but prevents
- // a WAL snapshot entry from having no corresponding snapshot file.
- if err := rc.snapshotter.SaveSnap(snap); err != nil {
- return err
- }
- if err := rc.wal.SaveSnapshot(walSnap); err != nil {
- return err
- }
- return rc.wal.ReleaseLockTo(snap.Metadata.Index)
-}
-
-func (rc *raftNode) entriesToApply(ents []raftpb.Entry) (nents []raftpb.Entry) {
- if len(ents) == 0 {
- return ents
- }
- firstIdx := ents[0].Index
- if firstIdx > rc.appliedIndex+1 {
- log.Fatalf("first index of committed entry[%d] should <= progress.appliedIndex[%d]+1", firstIdx, rc.appliedIndex)
- }
- if rc.appliedIndex-firstIdx+1 < uint64(len(ents)) {
- nents = ents[rc.appliedIndex-firstIdx+1:]
- }
- return nents
-}
-
-// publishEntries writes committed log entries to commit channel and returns
-// whether all entries could be published.
-func (rc *raftNode) publishEntries(ents []raftpb.Entry) (<-chan struct{}, bool) {
- if len(ents) == 0 {
- return nil, true
- }
-
- data := make([]string, 0, len(ents))
- for i := range ents {
- switch ents[i].Type {
- case raftpb.EntryNormal:
- if len(ents[i].Data) == 0 {
- // ignore empty messages
- break
- }
- s := string(ents[i].Data)
- data = append(data, s)
- case raftpb.EntryConfChange:
- var cc raftpb.ConfChange
- cc.Unmarshal(ents[i].Data)
- rc.confState = *rc.node.ApplyConfChange(cc)
- switch cc.Type {
- case raftpb.ConfChangeAddNode:
- if len(cc.Context) > 0 {
- rc.transport.AddPeer(types.ID(cc.NodeID), []string{string(cc.Context)})
- }
- case raftpb.ConfChangeRemoveNode:
- if cc.NodeID == uint64(rc.id) {
- log.Println("I've been removed from the cluster! Shutting down.")
- return nil, false
- }
- rc.transport.RemovePeer(types.ID(cc.NodeID))
- }
- }
- }
-
- var applyDoneC chan struct{}
-
- if len(data) > 0 {
- applyDoneC = make(chan struct{}, 1)
- select {
- case rc.commitC <- &commit{data, applyDoneC}:
- case <-rc.stopc:
- return nil, false
- }
- }
-
- // after commit, update appliedIndex
- rc.appliedIndex = ents[len(ents)-1].Index
-
- return applyDoneC, true
-}
-
-func (rc *raftNode) loadSnapshot() *raftpb.Snapshot {
- if wal.Exist(rc.waldir) {
- walSnaps, err := wal.ValidSnapshotEntries(rc.logger, rc.waldir)
- if err != nil {
- log.Fatalf("raftexample: error listing snapshots (%v)", err)
- }
- snapshot, err := rc.snapshotter.LoadNewestAvailable(walSnaps)
- if err != nil && err != snap.ErrNoSnapshot {
- log.Fatalf("raftexample: error loading snapshot (%v)", err)
- }
- return snapshot
- }
- return &raftpb.Snapshot{}
-}
-
-// openWAL returns a WAL ready for reading.
-func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot) *wal.WAL {
- if !wal.Exist(rc.waldir) {
- if err := os.Mkdir(rc.waldir, 0750); err != nil {
- log.Fatalf("raftexample: cannot create dir for wal (%v)", err)
- }
-
- w, err := wal.Create(zap.NewExample(), rc.waldir, nil)
- if err != nil {
- log.Fatalf("raftexample: create wal error (%v)", err)
- }
- w.Close()
- }
-
- walsnap := walpb.Snapshot{}
- if snapshot != nil {
- walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
- }
- log.Printf("loading WAL at term %d and index %d", walsnap.Term, walsnap.Index)
- w, err := wal.Open(zap.NewExample(), rc.waldir, walsnap)
- if err != nil {
- log.Fatalf("raftexample: error loading wal (%v)", err)
- }
-
- return w
-}
-
-// replayWAL replays WAL entries into the raft instance.
-func (rc *raftNode) replayWAL() *wal.WAL {
- log.Printf("replaying WAL of member %d", rc.id)
- snapshot := rc.loadSnapshot()
- w := rc.openWAL(snapshot)
- _, st, ents, err := w.ReadAll()
- if err != nil {
- log.Fatalf("raftexample: failed to read WAL (%v)", err)
- }
- rc.raftStorage = raft.NewMemoryStorage()
- if snapshot != nil {
- rc.raftStorage.ApplySnapshot(*snapshot)
- }
- rc.raftStorage.SetHardState(st)
-
- // append to storage so raft starts at the right place in log
- rc.raftStorage.Append(ents)
-
- return w
-}
-
-func (rc *raftNode) writeError(err error) {
- rc.stopHTTP()
- close(rc.commitC)
- rc.errorC <- err
- close(rc.errorC)
- rc.node.Stop()
-}
-
-func (rc *raftNode) startRaft() {
- if !fileutil.Exist(rc.snapdir) {
- if err := os.Mkdir(rc.snapdir, 0750); err != nil {
- log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err)
- }
- }
- rc.snapshotter = snap.New(zap.NewExample(), rc.snapdir)
-
- oldwal := wal.Exist(rc.waldir)
- rc.wal = rc.replayWAL()
-
- // signal replay has finished
- rc.snapshotterReady <- rc.snapshotter
-
- rpeers := make([]raft.Peer, len(rc.peers))
- for i := range rpeers {
- rpeers[i] = raft.Peer{ID: uint64(i + 1)}
- }
- c := &raft.Config{
- ID: uint64(rc.id),
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: rc.raftStorage,
- MaxSizePerMsg: 1024 * 1024,
- MaxInflightMsgs: 256,
- MaxUncommittedEntriesSize: 1 << 30,
- }
-
- if oldwal || rc.join {
- rc.node = raft.RestartNode(c)
- } else {
- rc.node = raft.StartNode(c, rpeers)
- }
-
- rc.transport = &rafthttp.Transport{
- Logger: rc.logger,
- ID: types.ID(rc.id),
- ClusterID: 0x1000,
- Raft: rc,
- ServerStats: stats.NewServerStats("", ""),
- LeaderStats: stats.NewLeaderStats(zap.NewExample(), strconv.Itoa(rc.id)),
- ErrorC: make(chan error),
- }
-
- rc.transport.Start()
- for i := range rc.peers {
- if i+1 != rc.id {
- rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]})
- }
- }
-
- go rc.serveRaft()
- go rc.serveChannels()
-}
-
-// stop closes http, closes all channels, and stops raft.
-func (rc *raftNode) stop() {
- rc.stopHTTP()
- close(rc.commitC)
- close(rc.errorC)
- rc.node.Stop()
-}
-
-func (rc *raftNode) stopHTTP() {
- rc.transport.Stop()
- close(rc.httpstopc)
- <-rc.httpdonec
-}
-
-func (rc *raftNode) publishSnapshot(snapshotToSave raftpb.Snapshot) {
- if raft.IsEmptySnap(snapshotToSave) {
- return
- }
-
- log.Printf("publishing snapshot at index %d", rc.snapshotIndex)
- defer log.Printf("finished publishing snapshot at index %d", rc.snapshotIndex)
-
- if snapshotToSave.Metadata.Index <= rc.appliedIndex {
- log.Fatalf("snapshot index [%d] should > progress.appliedIndex [%d]", snapshotToSave.Metadata.Index, rc.appliedIndex)
- }
- rc.commitC <- nil // trigger kvstore to load snapshot
-
- rc.confState = snapshotToSave.Metadata.ConfState
- rc.snapshotIndex = snapshotToSave.Metadata.Index
- rc.appliedIndex = snapshotToSave.Metadata.Index
-}
-
-var snapshotCatchUpEntriesN uint64 = 10000
-
-func (rc *raftNode) maybeTriggerSnapshot(applyDoneC <-chan struct{}) {
- if rc.appliedIndex-rc.snapshotIndex <= rc.snapCount {
- return
- }
-
- // wait until all committed entries are applied (or server is closed)
- if applyDoneC != nil {
- select {
- case <-applyDoneC:
- case <-rc.stopc:
- return
- }
- }
-
- log.Printf("start snapshot [applied index: %d | last snapshot index: %d]", rc.appliedIndex, rc.snapshotIndex)
- data, err := rc.getSnapshot()
- if err != nil {
- log.Panic(err)
- }
- snap, err := rc.raftStorage.CreateSnapshot(rc.appliedIndex, &rc.confState, data)
- if err != nil {
- panic(err)
- }
- if err := rc.saveSnap(snap); err != nil {
- panic(err)
- }
-
- compactIndex := uint64(1)
- if rc.appliedIndex > snapshotCatchUpEntriesN {
- compactIndex = rc.appliedIndex - snapshotCatchUpEntriesN
- }
- if err := rc.raftStorage.Compact(compactIndex); err != nil {
- panic(err)
- }
-
- log.Printf("compacted log at index %d", compactIndex)
- rc.snapshotIndex = rc.appliedIndex
-}
-
-func (rc *raftNode) serveChannels() {
- snap, err := rc.raftStorage.Snapshot()
- if err != nil {
- panic(err)
- }
- rc.confState = snap.Metadata.ConfState
- rc.snapshotIndex = snap.Metadata.Index
- rc.appliedIndex = snap.Metadata.Index
-
- defer rc.wal.Close()
-
- ticker := time.NewTicker(100 * time.Millisecond)
- defer ticker.Stop()
-
- // send proposals over raft
- go func() {
- confChangeCount := uint64(0)
-
- for rc.proposeC != nil && rc.confChangeC != nil {
- select {
- case prop, ok := <-rc.proposeC:
- if !ok {
- rc.proposeC = nil
- } else {
- // blocks until accepted by raft state machine
- rc.node.Propose(context.TODO(), []byte(prop))
- }
-
- case cc, ok := <-rc.confChangeC:
- if !ok {
- rc.confChangeC = nil
- } else {
- confChangeCount++
- cc.ID = confChangeCount
- rc.node.ProposeConfChange(context.TODO(), cc)
- }
- }
- }
- // client closed channel; shutdown raft if not already
- close(rc.stopc)
- }()
-
- // event loop on raft state machine updates
- for {
- select {
- case <-ticker.C:
- rc.node.Tick()
-
- // store raft entries to wal, then publish over commit channel
- case rd := <-rc.node.Ready():
- // Must save the snapshot file and WAL snapshot entry before saving any other entries
- // or hardstate to ensure that recovery after a snapshot restore is possible.
- if !raft.IsEmptySnap(rd.Snapshot) {
- rc.saveSnap(rd.Snapshot)
- }
- rc.wal.Save(rd.HardState, rd.Entries)
- if !raft.IsEmptySnap(rd.Snapshot) {
- rc.raftStorage.ApplySnapshot(rd.Snapshot)
- rc.publishSnapshot(rd.Snapshot)
- }
- rc.raftStorage.Append(rd.Entries)
- rc.transport.Send(rc.processMessages(rd.Messages))
- applyDoneC, ok := rc.publishEntries(rc.entriesToApply(rd.CommittedEntries))
- if !ok {
- rc.stop()
- return
- }
- rc.maybeTriggerSnapshot(applyDoneC)
- rc.node.Advance()
-
- case err := <-rc.transport.ErrorC:
- rc.writeError(err)
- return
-
- case <-rc.stopc:
- rc.stop()
- return
- }
- }
-}
-
-// When there is a `raftpb.EntryConfChange` after creating the snapshot,
-// then the confState included in the snapshot is out of date. so We need
-// to update the confState before sending a snapshot to a follower.
-func (rc *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
- for i := 0; i < len(ms); i++ {
- if ms[i].Type == raftpb.MsgSnap {
- ms[i].Snapshot.Metadata.ConfState = rc.confState
- }
- }
- return ms
-}
-
-func (rc *raftNode) serveRaft() {
- url, err := url.Parse(rc.peers[rc.id-1])
- if err != nil {
- log.Fatalf("raftexample: Failed parsing URL (%v)", err)
- }
-
- ln, err := newStoppableListener(url.Host, rc.httpstopc)
- if err != nil {
- log.Fatalf("raftexample: Failed to listen rafthttp (%v)", err)
- }
-
- err = (&http.Server{Handler: rc.transport.Handler()}).Serve(ln)
- select {
- case <-rc.httpstopc:
- default:
- log.Fatalf("raftexample: Failed to serve rafthttp (%v)", err)
- }
- close(rc.httpdonec)
-}
-
-func (rc *raftNode) Process(ctx context.Context, m raftpb.Message) error {
- return rc.node.Step(ctx, m)
-}
-func (rc *raftNode) IsIDRemoved(id uint64) bool { return false }
-func (rc *raftNode) ReportUnreachable(id uint64) { rc.node.ReportUnreachable(id) }
-func (rc *raftNode) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
- rc.node.ReportSnapshot(id, status)
-}
diff --git a/contrib/raftexample/raft_test.go b/contrib/raftexample/raft_test.go
deleted file mode 100644
index 5a0385be226..00000000000
--- a/contrib/raftexample/raft_test.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2022 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "reflect"
- "testing"
-
- "go.etcd.io/raft/v3/raftpb"
-)
-
-func TestProcessMessages(t *testing.T) {
- cases := []struct {
- name string
- confState raftpb.ConfState
- InputMessages []raftpb.Message
- ExpectedMessages []raftpb.Message
- }{
- {
- name: "only one snapshot message",
- confState: raftpb.ConfState{
- Voters: []uint64{2, 6, 8, 10},
- },
- InputMessages: []raftpb.Message{
- {
- Type: raftpb.MsgSnap,
- To: 8,
- Snapshot: &raftpb.Snapshot{
- Metadata: raftpb.SnapshotMetadata{
- Index: 100,
- Term: 3,
- ConfState: raftpb.ConfState{
- Voters: []uint64{2, 6, 8},
- AutoLeave: true,
- },
- },
- },
- },
- },
- ExpectedMessages: []raftpb.Message{
- {
- Type: raftpb.MsgSnap,
- To: 8,
- Snapshot: &raftpb.Snapshot{
- Metadata: raftpb.SnapshotMetadata{
- Index: 100,
- Term: 3,
- ConfState: raftpb.ConfState{
- Voters: []uint64{2, 6, 8, 10},
- },
- },
- },
- },
- },
- },
- {
- name: "one snapshot message and one other message",
- confState: raftpb.ConfState{
- Voters: []uint64{2, 7, 8, 12},
- },
- InputMessages: []raftpb.Message{
- {
- Type: raftpb.MsgSnap,
- To: 8,
- Snapshot: &raftpb.Snapshot{
- Metadata: raftpb.SnapshotMetadata{
- Index: 100,
- Term: 3,
- ConfState: raftpb.ConfState{
- Voters: []uint64{2, 6, 8},
- AutoLeave: true,
- },
- },
- },
- },
- {
- Type: raftpb.MsgApp,
- From: 6,
- To: 8,
- },
- },
- ExpectedMessages: []raftpb.Message{
- {
- Type: raftpb.MsgSnap,
- To: 8,
- Snapshot: &raftpb.Snapshot{
- Metadata: raftpb.SnapshotMetadata{
- Index: 100,
- Term: 3,
- ConfState: raftpb.ConfState{
- Voters: []uint64{2, 7, 8, 12},
- },
- },
- },
- },
- {
- Type: raftpb.MsgApp,
- From: 6,
- To: 8,
- },
- },
- },
- }
-
- for _, tc := range cases {
- t.Run(tc.name, func(t *testing.T) {
- rn := &raftNode{
- confState: tc.confState,
- }
-
- outputMessages := rn.processMessages(tc.InputMessages)
-
- if !reflect.DeepEqual(outputMessages, tc.ExpectedMessages) {
- t.Fatalf("Unexpected messages, expected: %v, got %v", tc.ExpectedMessages, outputMessages)
- }
- })
- }
-}
diff --git a/contrib/raftexample/raftexample_test.go b/contrib/raftexample/raftexample_test.go
deleted file mode 100644
index f7aa335eb04..00000000000
--- a/contrib/raftexample/raftexample_test.go
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "net/http/httptest"
- "os"
- "sync"
- "testing"
- "time"
-
- "go.etcd.io/raft/v3/raftpb"
-)
-
-func getSnapshotFn() (func() ([]byte, error), <-chan struct{}) {
- snapshotTriggeredC := make(chan struct{})
- return func() ([]byte, error) {
- snapshotTriggeredC <- struct{}{}
- return nil, nil
- }, snapshotTriggeredC
-}
-
-type cluster struct {
- peers []string
- commitC []<-chan *commit
- errorC []<-chan error
- proposeC []chan string
- confChangeC []chan raftpb.ConfChange
- snapshotTriggeredC []<-chan struct{}
-}
-
-// newCluster creates a cluster of n nodes
-func newCluster(n int) *cluster {
- peers := make([]string, n)
- for i := range peers {
- peers[i] = fmt.Sprintf("http://127.0.0.1:%d", 10000+i)
- }
-
- clus := &cluster{
- peers: peers,
- commitC: make([]<-chan *commit, len(peers)),
- errorC: make([]<-chan error, len(peers)),
- proposeC: make([]chan string, len(peers)),
- confChangeC: make([]chan raftpb.ConfChange, len(peers)),
- snapshotTriggeredC: make([]<-chan struct{}, len(peers)),
- }
-
- for i := range clus.peers {
- os.RemoveAll(fmt.Sprintf("raftexample-%d", i+1))
- os.RemoveAll(fmt.Sprintf("raftexample-%d-snap", i+1))
- clus.proposeC[i] = make(chan string, 1)
- clus.confChangeC[i] = make(chan raftpb.ConfChange, 1)
- fn, snapshotTriggeredC := getSnapshotFn()
- clus.snapshotTriggeredC[i] = snapshotTriggeredC
- clus.commitC[i], clus.errorC[i], _ = newRaftNode(i+1, clus.peers, false, fn, clus.proposeC[i], clus.confChangeC[i])
- }
-
- return clus
-}
-
-// Close closes all cluster nodes and returns an error if any failed.
-func (clus *cluster) Close() (err error) {
- for i := range clus.peers {
- go func(i int) {
- for range clus.commitC[i] {
- // drain pending commits
- }
- }(i)
- close(clus.proposeC[i])
- // wait for channel to close
- if erri := <-clus.errorC[i]; erri != nil {
- err = erri
- }
- // clean intermediates
- os.RemoveAll(fmt.Sprintf("raftexample-%d", i+1))
- os.RemoveAll(fmt.Sprintf("raftexample-%d-snap", i+1))
- }
- return err
-}
-
-func (clus *cluster) closeNoErrors(t *testing.T) {
- t.Log("closing cluster...")
- if err := clus.Close(); err != nil {
- t.Fatal(err)
- }
- t.Log("closing cluster [done]")
-}
-
-// TestProposeOnCommit starts three nodes and feeds commits back into the proposal
-// channel. The intent is to ensure blocking on a proposal won't block raft progress.
-func TestProposeOnCommit(t *testing.T) {
- clus := newCluster(3)
- defer clus.closeNoErrors(t)
-
- donec := make(chan struct{})
- for i := range clus.peers {
- // feedback for "n" committed entries, then update donec
- go func(pC chan<- string, cC <-chan *commit, eC <-chan error) {
- for n := 0; n < 100; n++ {
- c, ok := <-cC
- if !ok {
- pC = nil
- }
- select {
- case pC <- c.data[0]:
- continue
- case err := <-eC:
- t.Errorf("eC message (%v)", err)
- }
- }
- donec <- struct{}{}
- for range cC {
- // acknowledge the commits from other nodes so
- // raft continues to make progress
- }
- }(clus.proposeC[i], clus.commitC[i], clus.errorC[i])
-
- // one message feedback per node
- go func(i int) { clus.proposeC[i] <- "foo" }(i)
- }
-
- for range clus.peers {
- <-donec
- }
-}
-
-// TestCloseProposerBeforeReplay tests closing the producer before raft starts.
-func TestCloseProposerBeforeReplay(t *testing.T) {
- clus := newCluster(1)
- // close before replay so raft never starts
- defer clus.closeNoErrors(t)
-}
-
-// TestCloseProposerInflight tests closing the producer while
-// committed messages are being published to the client.
-func TestCloseProposerInflight(t *testing.T) {
- clus := newCluster(1)
- defer clus.closeNoErrors(t)
-
- var wg sync.WaitGroup
- wg.Add(1)
-
- // some inflight ops
- go func() {
- defer wg.Done()
- clus.proposeC[0] <- "foo"
- clus.proposeC[0] <- "bar"
- }()
-
- // wait for one message
- if c, ok := <-clus.commitC[0]; !ok || c.data[0] != "foo" {
- t.Fatalf("Commit failed")
- }
-
- wg.Wait()
-}
-
-func TestPutAndGetKeyValue(t *testing.T) {
- clusters := []string{"http://127.0.0.1:9021"}
-
- proposeC := make(chan string)
- defer close(proposeC)
-
- confChangeC := make(chan raftpb.ConfChange)
- defer close(confChangeC)
-
- var kvs *kvstore
- getSnapshot := func() ([]byte, error) { return kvs.getSnapshot() }
- commitC, errorC, snapshotterReady := newRaftNode(1, clusters, false, getSnapshot, proposeC, confChangeC)
-
- kvs = newKVStore(<-snapshotterReady, proposeC, commitC, errorC)
-
- srv := httptest.NewServer(&httpKVAPI{
- store: kvs,
- confChangeC: confChangeC,
- })
- defer srv.Close()
-
- // wait server started
- <-time.After(time.Second * 3)
-
- wantKey, wantValue := "test-key", "test-value"
- url := fmt.Sprintf("%s/%s", srv.URL, wantKey)
- body := bytes.NewBufferString(wantValue)
- cli := srv.Client()
-
- req, err := http.NewRequest("PUT", url, body)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("Content-Type", "text/html; charset=utf-8")
- _, err = cli.Do(req)
- if err != nil {
- t.Fatal(err)
- }
-
- // wait for a moment for processing message, otherwise get would be failed.
- <-time.After(time.Second)
-
- resp, err := cli.Get(url)
- if err != nil {
- t.Fatal(err)
- }
-
- data, err := io.ReadAll(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
-
- if gotValue := string(data); wantValue != gotValue {
- t.Fatalf("expect %s, got %s", wantValue, gotValue)
- }
-}
-
-// TestAddNewNode tests adding new node to the existing cluster.
-func TestAddNewNode(t *testing.T) {
- clus := newCluster(3)
- defer clus.closeNoErrors(t)
-
- os.RemoveAll("raftexample-4")
- os.RemoveAll("raftexample-4-snap")
- defer func() {
- os.RemoveAll("raftexample-4")
- os.RemoveAll("raftexample-4-snap")
- }()
-
- newNodeURL := "http://127.0.0.1:10004"
- clus.confChangeC[0] <- raftpb.ConfChange{
- Type: raftpb.ConfChangeAddNode,
- NodeID: 4,
- Context: []byte(newNodeURL),
- }
-
- proposeC := make(chan string)
- defer close(proposeC)
-
- confChangeC := make(chan raftpb.ConfChange)
- defer close(confChangeC)
-
- newRaftNode(4, append(clus.peers, newNodeURL), true, nil, proposeC, confChangeC)
-
- go func() {
- proposeC <- "foo"
- }()
-
- if c, ok := <-clus.commitC[0]; !ok || c.data[0] != "foo" {
- t.Fatalf("Commit failed")
- }
-}
-
-func TestSnapshot(t *testing.T) {
- prevDefaultSnapshotCount := defaultSnapshotCount
- prevSnapshotCatchUpEntriesN := snapshotCatchUpEntriesN
- defaultSnapshotCount = 4
- snapshotCatchUpEntriesN = 4
- defer func() {
- defaultSnapshotCount = prevDefaultSnapshotCount
- snapshotCatchUpEntriesN = prevSnapshotCatchUpEntriesN
- }()
-
- clus := newCluster(3)
- defer clus.closeNoErrors(t)
-
- go func() {
- clus.proposeC[0] <- "foo"
- }()
-
- c := <-clus.commitC[0]
-
- select {
- case <-clus.snapshotTriggeredC[0]:
- t.Fatalf("snapshot triggered before applying done")
- default:
- }
- close(c.applyDoneC)
- <-clus.snapshotTriggeredC[0]
-}
diff --git a/contrib/systemd/etcd.service b/contrib/systemd/etcd.service
deleted file mode 100644
index 8fc0570c6dd..00000000000
--- a/contrib/systemd/etcd.service
+++ /dev/null
@@ -1,18 +0,0 @@
-[Unit]
-Description=etcd key-value store
-Documentation=https://github.com/etcd-io/etcd
-After=network-online.target local-fs.target remote-fs.target time-sync.target
-Wants=network-online.target local-fs.target remote-fs.target time-sync.target
-
-[Service]
-User=etcd
-Type=notify
-Environment=ETCD_DATA_DIR=/var/lib/etcd
-Environment=ETCD_NAME=%m
-ExecStart=/usr/bin/etcd
-Restart=always
-RestartSec=10s
-LimitNOFILE=40000
-
-[Install]
-WantedBy=multi-user.target
diff --git a/contrib/systemd/etcd3-multinode/README.md b/contrib/systemd/etcd3-multinode/README.md
deleted file mode 100644
index cab9ab4d4cb..00000000000
--- a/contrib/systemd/etcd3-multinode/README.md
+++ /dev/null
@@ -1,173 +0,0 @@
-# etcd3 multi-node cluster
-
-Here's how to deploy etcd cluster with systemd.
-
-## Set up data directory
-
-etcd needs data directory on host machine. Configure the data directory accessible to systemd as:
-
-```
-sudo mkdir -p /var/lib/etcd
-sudo chown -R root:$(whoami) /var/lib/etcd
-sudo chmod -R a+rw /var/lib/etcd
-```
-
-## Write systemd service file
-
-In each machine, write etcd systemd service files:
-
-```
-cat > /tmp/my-etcd-1.service < /tmp/my-etcd-2.service < /tmp/my-etcd-3.service <]
+ storageType: S3
+ backupPolicy:
+ # 0 > enable periodic backup
+ backupIntervalInSecond: 125
+ maxBackups: 4
+ s3:
+ # The format of "path" must be: "/"
+ # e.g: "mybucket/etcd.backup"
+ path:
+ awsSecret:
\ No newline at end of file
diff --git a/etcd-io-chaos.yaml b/etcd-io-chaos.yaml
new file mode 100644
index 00000000000..bc332c0466d
--- /dev/null
+++ b/etcd-io-chaos.yaml
@@ -0,0 +1,17 @@
+apiVersion: chaos-mesh.org/v1alpha1
+kind: IoChaos
+metadata:
+ name: io-delay-example
+spec:
+ action: latency
+ mode: one
+ selector:
+ labelSelectors:
+ app: etcd
+ volumePath: /var/run/etcd
+ path: '/var/run/etcd/**/*'
+ delay: '100ms'
+ percent: 50
+ duration: '400s'
+ scheduler:
+ cron: '@every 10m'
\ No newline at end of file
diff --git a/etcd-monitor.yaml b/etcd-monitor.yaml
new file mode 100644
index 00000000000..8ae25bf3697
--- /dev/null
+++ b/etcd-monitor.yaml
@@ -0,0 +1,20 @@
+apiVersion: etcd.cloud.tencent.com/v1beta1
+kind: EtcdMonitor
+metadata:
+ labels:
+ clusterName: gz-qcloud-etcd-03
+ region: gz
+ name: gz-qcloud-etcd-03-etcd-node-key-diff
+ namespace: gz
+spec:
+ clusterId: gz-qcloud-etcd-03
+ metricName: etcd-node-key-diff
+ metricProviderName: cruiser
+ name: gz-qcloud-etcd-03
+ productName: tke
+ region: gz
+ status:
+ records:
+ - endTime: "2021-02-25T11:22:26Z"
+ message: collectEtcdNodeKeyDiff,etcd cluster gz-qcloud-etcd-03,total key num is
+ 122143,nodeKeyDiff is 0
diff --git a/etcd.conf.yml.sample b/etcd.conf.yml.sample
deleted file mode 100644
index 38d74bcb793..00000000000
--- a/etcd.conf.yml.sample
+++ /dev/null
@@ -1,140 +0,0 @@
-# This is the configuration file for the etcd server.
-
-# Human-readable name for this member.
-name: 'default'
-
-# Path to the data directory.
-data-dir:
-
-# Path to the dedicated wal directory.
-wal-dir:
-
-# Number of committed transactions to trigger a snapshot to disk.
-snapshot-count: 10000
-
-# Time (in milliseconds) of a heartbeat interval.
-heartbeat-interval: 100
-
-# Time (in milliseconds) for an election to timeout.
-election-timeout: 1000
-
-# Raise alarms when backend size exceeds the given quota. 0 means use the
-# default quota.
-quota-backend-bytes: 0
-
-# List of comma separated URLs to listen on for peer traffic.
-listen-peer-urls: http://localhost:2380
-
-# List of comma separated URLs to listen on for client traffic.
-listen-client-urls: http://localhost:2379
-
-# Maximum number of snapshot files to retain (0 is unlimited).
-max-snapshots: 5
-
-# Maximum number of wal files to retain (0 is unlimited).
-max-wals: 5
-
-# Comma-separated white list of origins for CORS (cross-origin resource sharing).
-cors:
-
-# List of this member's peer URLs to advertise to the rest of the cluster.
-# The URLs needed to be a comma-separated list.
-initial-advertise-peer-urls: http://localhost:2380
-
-# List of this member's client URLs to advertise to the public.
-# The URLs needed to be a comma-separated list.
-advertise-client-urls: http://localhost:2379
-
-# Discovery URL used to bootstrap the cluster.
-discovery:
-
-# Valid values include 'exit', 'proxy'
-discovery-fallback: 'proxy'
-
-# HTTP proxy to use for traffic to discovery service.
-discovery-proxy:
-
-# DNS domain used to bootstrap initial cluster.
-discovery-srv:
-
-# Initial cluster configuration for bootstrapping.
-initial-cluster:
-
-# Initial cluster token for the etcd cluster during bootstrap.
-initial-cluster-token: 'etcd-cluster'
-
-# Initial cluster state ('new' or 'existing').
-initial-cluster-state: 'new'
-
-# Reject reconfiguration requests that would cause quorum loss.
-strict-reconfig-check: false
-
-# Enable runtime profiling data via HTTP server
-enable-pprof: true
-
-# Valid values include 'on', 'readonly', 'off'
-proxy: 'off'
-
-# Time (in milliseconds) an endpoint will be held in a failed state.
-proxy-failure-wait: 5000
-
-# Time (in milliseconds) of the endpoints refresh interval.
-proxy-refresh-interval: 30000
-
-# Time (in milliseconds) for a dial to timeout.
-proxy-dial-timeout: 1000
-
-# Time (in milliseconds) for a write to timeout.
-proxy-write-timeout: 5000
-
-# Time (in milliseconds) for a read to timeout.
-proxy-read-timeout: 0
-
-client-transport-security:
- # Path to the client server TLS cert file.
- cert-file:
-
- # Path to the client server TLS key file.
- key-file:
-
- # Enable client cert authentication.
- client-cert-auth: false
-
- # Path to the client server TLS trusted CA cert file.
- trusted-ca-file:
-
- # Client TLS using generated certificates
- auto-tls: false
-
-peer-transport-security:
- # Path to the peer server TLS cert file.
- cert-file:
-
- # Path to the peer server TLS key file.
- key-file:
-
- # Enable peer client cert authentication.
- client-cert-auth: false
-
- # Path to the peer server TLS trusted CA cert file.
- trusted-ca-file:
-
- # Peer TLS using generated certificates.
- auto-tls: false
-
-# The validity period of the self-signed certificate, the unit is year.
-self-signed-cert-validity: 1
-
-# Enable debug-level logging for etcd.
-log-level: debug
-
-logger: zap
-
-# Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.
-log-outputs: [stderr]
-
-# Force to create a new one member cluster.
-force-new-cluster: false
-
-auto-compaction-mode: periodic
-auto-compaction-retention: "1"
diff --git a/server/auth/doc.go b/etcd/auth/doc.go
similarity index 100%
rename from server/auth/doc.go
rename to etcd/auth/doc.go
diff --git a/etcd/auth/jwt_token.go b/etcd/auth/jwt_token.go
new file mode 100644
index 00000000000..e0dcc88e7a4
--- /dev/null
+++ b/etcd/auth/jwt_token.go
@@ -0,0 +1,163 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "time"
+
+ jwt "github.com/form3tech-oss/jwt-go"
+ "go.uber.org/zap"
+)
+
+type tokenJWT struct {
+ lg *zap.Logger
+ signMethod jwt.SigningMethod
+ key interface{}
+ ttl time.Duration
+ verifyOnly bool
+}
+
+func (t *tokenJWT) enable() {}
+func (t *tokenJWT) disable() {}
+func (t *tokenJWT) invalidateUser(string) {}
+func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
+
+// 从ctx中的token获取用户信息
+func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
+ // rev isn't used in JWT, it is only used in simple token
+ var (
+ username string
+ revision uint64
+ )
+
+ parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
+ if token.Method.Alg() != t.signMethod.Alg() {
+ return nil, errors.New("invalid signing method")
+ }
+ switch k := t.key.(type) {
+ case *rsa.PrivateKey:
+ return &k.PublicKey, nil
+ case *ecdsa.PrivateKey:
+ return &k.PublicKey, nil
+ default:
+ return t.key, nil
+ }
+ })
+ if err != nil {
+ t.lg.Warn(
+ "failed to parse a JWT token",
+ zap.String("token", token),
+ zap.Error(err),
+ )
+ return nil, false
+ }
+
+ claims, ok := parsed.Claims.(jwt.MapClaims)
+ if !parsed.Valid || !ok {
+ t.lg.Warn("invalid JWT token", zap.String("token", token))
+ return nil, false
+ }
+
+ username = claims["username"].(string)
+ revision = uint64(claims["revision"].(float64))
+
+ return &AuthInfo{Username: username, Revision: revision}, true
+}
+
+func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
+ if t.verifyOnly {
+ return "", ErrVerifyOnly
+ }
+
+ // Future work: let a jwt token include permission information would be useful for
+ // permission checking in proxy side.
+ tk := jwt.NewWithClaims(t.signMethod,
+ jwt.MapClaims{
+ "username": username,
+ "revision": revision,
+ "exp": time.Now().Add(t.ttl).Unix(),
+ })
+
+ token, err := tk.SignedString(t.key)
+ if err != nil {
+ t.lg.Debug(
+ "failed to sign a JWT token",
+ zap.String("user-name", username),
+ zap.Uint64("revision", revision),
+ zap.Error(err),
+ )
+ return "", err
+ }
+
+ t.lg.Debug(
+ "created/assigned a new JWT token",
+ zap.String("user-name", username),
+ zap.Uint64("revision", revision),
+ zap.String("token", token),
+ )
+ return token, err
+}
+
+func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ var err error
+ var opts jwtOptions
+ err = opts.ParseWithDefaults(optMap)
+ if err != nil {
+ lg.Error("problem loading JWT options", zap.Error(err))
+ return nil, ErrInvalidAuthOpts
+ }
+
+ keys := make([]string, 0, len(optMap))
+ for k := range optMap {
+ if !knownOptions[k] {
+ keys = append(keys, k)
+ }
+ }
+ if len(keys) > 0 {
+ lg.Warn("unknown JWT options", zap.Strings("keys", keys))
+ }
+
+ key, err := opts.Key()
+ if err != nil {
+ return nil, err
+ }
+
+ t := &tokenJWT{
+ lg: lg,
+ ttl: opts.TTL,
+ signMethod: opts.SignMethod,
+ key: key,
+ }
+
+ switch t.signMethod.(type) {
+ case *jwt.SigningMethodECDSA:
+ if _, ok := t.key.(*ecdsa.PublicKey); ok {
+ t.verifyOnly = true
+ }
+ case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS:
+ if _, ok := t.key.(*rsa.PublicKey); ok {
+ t.verifyOnly = true
+ }
+ }
+
+ return t, nil
+}
diff --git a/server/auth/nop.go b/etcd/auth/nop.go
similarity index 99%
rename from server/auth/nop.go
rename to etcd/auth/nop.go
index d4378747bd8..8ba3f8c893c 100644
--- a/server/auth/nop.go
+++ b/etcd/auth/nop.go
@@ -27,9 +27,11 @@ func (t *tokenNop) genTokenPrefix() (string, error) { return "", nil }
func (t *tokenNop) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
return nil, false
}
+
func (t *tokenNop) assign(ctx context.Context, username string, revision uint64) (string, error) {
return "", ErrAuthFailed
}
+
func newTokenProviderNop() (*tokenNop, error) {
return &tokenNop{}, nil
}
diff --git a/etcd/auth/options.go b/etcd/auth/options.go
new file mode 100644
index 00000000000..633df6635b5
--- /dev/null
+++ b/etcd/auth/options.go
@@ -0,0 +1,191 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "fmt"
+ "io/ioutil"
+ "time"
+
+ jwt "github.com/form3tech-oss/jwt-go"
+)
+
+const (
+ optSignMethod = "sign-method"
+ optPublicKey = "pub-key"
+ optPrivateKey = "priv-key"
+ optTTL = "ttl"
+)
+
+var knownOptions = map[string]bool{
+ optSignMethod: true,
+ optPublicKey: true,
+ optPrivateKey: true,
+ optTTL: true,
+}
+
+// DefaultTTL will be used when a 'ttl' is not specified
+var DefaultTTL = 5 * time.Minute
+
+type jwtOptions struct {
+ SignMethod jwt.SigningMethod // jwt header里存储的签名方法
+ PublicKey []byte
+ PrivateKey []byte
+ TTL time.Duration
+}
+
+// ParseWithDefaults will load options from the specified map or set defaults where appropriate
+func (opts *jwtOptions) ParseWithDefaults(optMap map[string]string) error {
+ if opts.TTL == 0 && optMap[optTTL] == "" {
+ opts.TTL = DefaultTTL
+ }
+
+ return opts.Parse(optMap)
+}
+
+// Parse will load options from the specified map
+func (opts *jwtOptions) Parse(optMap map[string]string) error {
+ var err error
+ if ttl := optMap[optTTL]; ttl != "" {
+ opts.TTL, err = time.ParseDuration(ttl)
+ if err != nil {
+ return err
+ }
+ }
+
+ if file := optMap[optPublicKey]; file != "" {
+ opts.PublicKey, err = ioutil.ReadFile(file)
+ if err != nil {
+ return err
+ }
+ }
+
+ if file := optMap[optPrivateKey]; file != "" {
+ opts.PrivateKey, err = ioutil.ReadFile(file)
+ if err != nil {
+ return err
+ }
+ }
+
+ // signing method is a required field
+ method := optMap[optSignMethod]
+ opts.SignMethod = jwt.GetSigningMethod(method)
+ if opts.SignMethod == nil {
+ return ErrInvalidAuthMethod
+ }
+
+ return nil
+}
+
+// Key will parse and return the appropriately typed key for the selected signature method
+// --auth-token jwt,ttl=30s,sign-method=HS256
+func (opts *jwtOptions) Key() (interface{}, error) {
+ switch opts.SignMethod.(type) {
+ case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS:
+ return opts.rsaKey()
+ case *jwt.SigningMethodECDSA: // ES256、ES384、ES512 公钥、私钥至少一个
+ return opts.ecKey()
+ case *jwt.SigningMethodHMAC: // HS256、 HS384、HS512 需要私钥
+ return opts.hmacKey()
+ default:
+ return nil, fmt.Errorf("unsupported signing method: %T", opts.SignMethod)
+ }
+}
+
+func (opts *jwtOptions) hmacKey() (interface{}, error) {
+ if len(opts.PrivateKey) == 0 {
+ return nil, ErrMissingKey
+ }
+ return opts.PrivateKey, nil
+}
+
+func (opts *jwtOptions) rsaKey() (interface{}, error) {
+ var (
+ priv *rsa.PrivateKey
+ pub *rsa.PublicKey
+ err error
+ )
+
+ if len(opts.PrivateKey) > 0 {
+ priv, err = jwt.ParseRSAPrivateKeyFromPEM(opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(opts.PublicKey) > 0 {
+ pub, err = jwt.ParseRSAPublicKeyFromPEM(opts.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if priv == nil {
+ if pub == nil {
+ // Neither key given
+ return nil, ErrMissingKey
+ }
+ // Public key only, can verify tokens
+ return pub, nil
+ }
+
+ // both keys provided, make sure they match
+ if pub != nil && pub.E != priv.E && pub.N.Cmp(priv.N) != 0 {
+ return nil, ErrKeyMismatch
+ }
+
+ return priv, nil
+}
+
+func (opts *jwtOptions) ecKey() (interface{}, error) {
+ var (
+ priv *ecdsa.PrivateKey
+ pub *ecdsa.PublicKey
+ err error
+ )
+
+ if len(opts.PrivateKey) > 0 {
+ priv, err = jwt.ParseECPrivateKeyFromPEM(opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(opts.PublicKey) > 0 {
+ pub, err = jwt.ParseECPublicKeyFromPEM(opts.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if priv == nil {
+ if pub == nil {
+ // Neither key given
+ return nil, ErrMissingKey
+ }
+ // Public key only, can verify tokens
+ return pub, nil
+ }
+
+ // both keys provided, make sure they match
+ if pub != nil && pub.Curve != priv.Curve &&
+ pub.X.Cmp(priv.X) != 0 && pub.Y.Cmp(priv.Y) != 0 {
+ return nil, ErrKeyMismatch
+ }
+
+ return priv, nil
+}
diff --git a/etcd/auth/range_perm_cache.go b/etcd/auth/range_perm_cache.go
new file mode 100644
index 00000000000..33d24d69162
--- /dev/null
+++ b/etcd/auth/range_perm_cache.go
@@ -0,0 +1,143 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/authpb"
+ "github.com/ls-2018/etcd_cn/pkg/adt"
+
+ "go.uber.org/zap"
+)
+
+func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions {
+ user := getUser(lg, tx, userName)
+ if user == nil {
+ return nil
+ }
+
+ readPerms := adt.NewIntervalTree()
+ writePerms := adt.NewIntervalTree()
+
+ for _, roleName := range user.Roles {
+ role := getRole(lg, tx, roleName)
+ if role == nil {
+ continue
+ }
+
+ for _, perm := range role.KeyPermission {
+ var ivl adt.Interval
+ var rangeEnd []byte
+
+ if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
+ rangeEnd = []byte(perm.RangeEnd)
+ }
+
+ if len(perm.RangeEnd) != 0 {
+ ivl = adt.NewBytesAffineInterval([]byte(perm.Key), rangeEnd)
+ } else {
+ ivl = adt.NewBytesAffinePoint([]byte(perm.Key))
+ }
+
+ switch perm.PermType {
+ case authpb.READWRITE:
+ readPerms.Insert(ivl, struct{}{})
+ writePerms.Insert(ivl, struct{}{})
+
+ case authpb.READ:
+ readPerms.Insert(ivl, struct{}{})
+
+ case authpb.WRITE:
+ writePerms.Insert(ivl, struct{}{})
+ }
+ }
+ }
+
+ return &unifiedRangePermissions{
+ readPerms: readPerms,
+ writePerms: writePerms,
+ }
+}
+
+func checkKeyInterval(
+ lg *zap.Logger,
+ cachedPerms *unifiedRangePermissions,
+ key, rangeEnd []byte,
+ permtyp authpb.Permission_Type,
+) bool {
+ if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
+ rangeEnd = nil
+ }
+
+ ivl := adt.NewBytesAffineInterval(key, rangeEnd)
+ switch permtyp {
+ case authpb.READ:
+ return cachedPerms.readPerms.Contains(ivl)
+ case authpb.WRITE:
+ return cachedPerms.writePerms.Contains(ivl)
+ default:
+ lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String()))
+ }
+ return false
+}
+
+func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
+ pt := adt.NewBytesAffinePoint(key)
+ switch permtyp {
+ case authpb.READ:
+ return cachedPerms.readPerms.Intersects(pt)
+ case authpb.WRITE:
+ return cachedPerms.writePerms.Intersects(pt)
+ default:
+ lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String()))
+ }
+ return false
+}
+
+func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
+ // assumption: tx is Lock()ed
+ _, ok := as.rangePermCache[userName]
+ if !ok {
+ perms := getMergedPerms(as.lg, tx, userName)
+ if perms == nil {
+ as.lg.Error(
+ "failed to create a merged permission",
+ zap.String("user-name", userName),
+ )
+ return false
+ }
+ as.rangePermCache[userName] = perms
+ }
+
+ if len(rangeEnd) == 0 {
+ return checkKeyPoint(as.lg, as.rangePermCache[userName], key, permtyp)
+ }
+
+ return checkKeyInterval(as.lg, as.rangePermCache[userName], key, rangeEnd, permtyp)
+}
+
+func (as *authStore) clearCachedPerm() {
+ as.rangePermCache = make(map[string]*unifiedRangePermissions)
+}
+
+// 清除缓存中的全新信息, 之后重新生成
+func (as *authStore) invalidateCachedPerm(userName string) {
+ delete(as.rangePermCache, userName)
+}
+
+type unifiedRangePermissions struct {
+ readPerms adt.IntervalTree
+ writePerms adt.IntervalTree
+}
diff --git a/server/auth/simple_token.go b/etcd/auth/simple_token.go
similarity index 91%
rename from server/auth/simple_token.go
rename to etcd/auth/simple_token.go
index fb9485b4ff9..2788ddbc2a2 100644
--- a/server/auth/simple_token.go
+++ b/etcd/auth/simple_token.go
@@ -20,7 +20,6 @@ package auth
import (
"context"
"crypto/rand"
- "errors"
"fmt"
"math/big"
"strconv"
@@ -75,7 +74,7 @@ func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
}
func (tm *simpleTokenTTLKeeper) run() {
- tokenTicker := time.NewTicker(simpleTokenTTLResolution)
+ tokenTicker := time.NewTicker(simpleTokenTTLResolution) // 1s
defer func() {
tokenTicker.Stop()
close(tm.donec)
@@ -89,6 +88,7 @@ func (tm *simpleTokenTTLKeeper) run() {
if nowtime.After(tokenendtime) {
tm.deleteTokenFunc(t)
delete(tm.tokens, t)
+ // 不过你要注意的是,Simple Token 字符串本身并未含任何有价值信息,因此 client 无法及时、准确获取到 Token 过期时间.所以 client 不容易提前去规避因 Token 失效导致的请求报错.
}
}
tm.mu.Unlock()
@@ -157,11 +157,6 @@ func (t *tokenSimple) invalidateUser(username string) {
}
func (t *tokenSimple) enable() {
- t.simpleTokensMu.Lock()
- defer t.simpleTokensMu.Unlock()
- if t.simpleTokenKeeper != nil { // already enabled
- return
- }
if t.simpleTokenTTL <= 0 {
t.simpleTokenTTL = simpleTokenTTLDefault
}
@@ -184,7 +179,7 @@ func (t *tokenSimple) enable() {
mu: &t.simpleTokensMu,
simpleTokenTTL: t.simpleTokenTTL,
}
- go t.simpleTokenKeeper.run()
+ go t.simpleTokenKeeper.run() // 定时检查你的 Token 是否过期,若过期则从 map 数据结构中删除此 Token.
}
func (t *tokenSimple) disable() {
@@ -213,11 +208,7 @@ func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (
func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
// rev isn't used in simple token, it is only used in JWT
- var index uint64
- var ok bool
- if index, ok = ctx.Value(AuthenticateParamIndex{}).(uint64); !ok {
- return "", errors.New("failed to assign")
- }
+ index := ctx.Value(AuthenticateParamIndex{}).(uint64)
simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string)
token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index)
t.assignSimpleTokenToUser(username, token)
@@ -236,7 +227,7 @@ func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool
}
select {
- case <-t.indexWaiter(index):
+ case <-t.indexWaiter(uint64(index)):
return true
case <-ctx.Done():
}
diff --git a/etcd/auth/store.go b/etcd/auth/store.go
new file mode 100644
index 00000000000..54c67fba2c3
--- /dev/null
+++ b/etcd/auth/store.go
@@ -0,0 +1,1229 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/binary"
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/authpb"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+)
+
+var (
+ enableFlagKey = []byte("authEnabled")
+ authEnabled = []byte{1}
+ authDisabled = []byte{0}
+
+ revisionKey = []byte("authRevision") // 鉴权版本号
+
+ ErrRootUserNotExist = errors.New("auth: root用户不存在")
+ ErrRootRoleNotExist = errors.New("auth: root用户没有root角色")
+ ErrUserAlreadyExist = errors.New("auth: 用户已存在")
+ ErrUserEmpty = errors.New("auth: 用户名是空的")
+ ErrUserNotFound = errors.New("auth: 没有找到该用户")
+ ErrRoleAlreadyExist = errors.New("auth: 角色已存在")
+ ErrRoleNotFound = errors.New("auth: 角色不存在")
+ ErrRoleEmpty = errors.New("auth: 角色名不能为空")
+ ErrPermissionNotGiven = errors.New("auth: permission not given")
+ ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
+ ErrNoPasswordUser = errors.New("auth: authentication failed, password was given for no password user")
+ ErrPermissionDenied = errors.New("auth: permission denied")
+ ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
+ ErrPermissionNotGranted = errors.New("auth: 角色没有权限")
+ ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
+ ErrAuthOldRevision = errors.New("auth: 请求头里的修订版本是旧的")
+ ErrInvalidAuthToken = errors.New("auth: invalid auth token")
+ ErrInvalidAuthOpts = errors.New("auth: invalid auth options")
+ ErrInvalidAuthMgmt = errors.New("auth: invalid auth management")
+ ErrInvalidAuthMethod = errors.New("auth: invalid auth signature method")
+ ErrMissingKey = errors.New("auth: missing key data")
+ ErrKeyMismatch = errors.New("auth: public and private keys don't match")
+ ErrVerifyOnly = errors.New("auth: token signing attempted with verify-only key")
+)
+
+const (
+ rootUser = "root"
+ rootRole = "root"
+ tokenTypeSimple = "simple"
+ tokenTypeJWT = "jwt"
+ revBytesLen = 8
+)
+
+type AuthInfo struct {
+ Username string
+ Revision uint64
+}
+
+// AuthenticateParamIndex is used for a key of context in the parameters of Authenticate()
+type AuthenticateParamIndex struct{}
+
+// AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate()
+type AuthenticateParamSimpleTokenPrefix struct{}
+
+type AuthStore interface {
+ AuthEnable() error
+ AuthDisable()
+ IsAuthEnabled() bool
+ Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error)
+ // Recover recovers the state of auth store from the given backend
+ Recover(b backend.Backend)
+ UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+ UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+ UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+ UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+ UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+ UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+
+ RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+ RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+ RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+
+ RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+ RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+ UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+ RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+ IsPutPermitted(authInfo *AuthInfo, key []byte) error
+ IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error // 检查用户的范围权限
+ IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error //
+ IsAdminPermitted(authInfo *AuthInfo) error //
+ GenTokenPrefix() (string, error) // 在简单令牌的情况下生成一个随机字符串,在JWT的情况下,它生成一个空字符串
+ Revision() uint64 //
+ CheckPassword(username, password string) (uint64, error) // 检查给定的一对用户名和密码是否正确
+ Close() error // 清理AuthStore
+ AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) // 从grpc上下文获取认证信息
+ AuthInfoFromTLS(ctx context.Context) *AuthInfo // 从grpc证书上下文获取认证信息
+ WithRoot(ctx context.Context) context.Context // 生成并安装可作为根凭据使用的令牌
+ UserHasRole(user, role string) bool // 检查用户是否有该角色
+ BcryptCost() int // 获取加密认证密码的散列强度
+}
+
+type TokenProvider interface {
+ info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool)
+ assign(ctx context.Context, username string, revision uint64) (string, error)
+ enable()
+ disable()
+ invalidateUser(string)
+ genTokenPrefix() (string, error)
+}
+
+type authStore struct {
+ revision uint64 // 鉴权版本号
+ lg *zap.Logger //
+ be backend.Backend //
+ enabled bool // 是否开启认证
+ enabledMu sync.RWMutex //
+ rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
+ tokenProvider TokenProvider // TODO
+ bcryptCost int // the algorithm cost / strength for hashing auth passwords
+}
+
+func (as *authStore) AuthEnable() error {
+ as.enabledMu.Lock()
+ defer as.enabledMu.Unlock()
+ if as.enabled {
+ as.lg.Info("authentication is already enabled; ignored auth enable request")
+ return nil
+ }
+ b := as.be
+ tx := b.BatchTx()
+ tx.Lock()
+ defer func() {
+ tx.Unlock()
+ b.ForceCommit()
+ }()
+
+ u := getUser(as.lg, tx, rootUser)
+ if u == nil {
+ return ErrRootUserNotExist
+ }
+
+ if !hasRootRole(u) {
+ return ErrRootRoleNotExist
+ }
+
+ tx.UnsafePut(buckets.Auth, enableFlagKey, authEnabled)
+
+ as.enabled = true
+ as.tokenProvider.enable()
+
+ as.rangePermCache = make(map[string]*unifiedRangePermissions)
+
+ as.setRevision(getRevision(tx))
+
+ as.lg.Info("enabled authentication")
+ return nil
+}
+
+func (as *authStore) AuthDisable() {
+ as.enabledMu.Lock()
+ defer as.enabledMu.Unlock()
+ if !as.enabled {
+ return
+ }
+ b := as.be
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafePut(buckets.Auth, enableFlagKey, authDisabled)
+ as.commitRevision(tx)
+ tx.Unlock()
+ b.ForceCommit()
+
+ as.enabled = false
+ as.tokenProvider.disable()
+
+ as.lg.Info("disabled authentication")
+}
+
+func (as *authStore) Close() error {
+ as.enabledMu.Lock()
+ defer as.enabledMu.Unlock()
+ if !as.enabled {
+ return nil
+ }
+ as.tokenProvider.disable()
+ return nil
+}
+
+func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
+ if !as.IsAuthEnabled() {
+ return nil, ErrAuthNotEnabled
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, username)
+ if user == nil {
+ return nil, ErrAuthFailed
+ }
+
+ if user.Options != nil && user.Options.NoPassword {
+ return nil, ErrAuthFailed
+ }
+
+ // 密码在API已经校验了,因此在这不用再校验
+ token, err := as.tokenProvider.assign(ctx, username, as.Revision())
+ if err != nil {
+ return nil, err
+ }
+
+ as.lg.Debug("用户认证", zap.String("user-name", username), zap.String("token", token))
+ return &pb.AuthenticateResponse{Token: token}, nil
+}
+
+func (as *authStore) Recover(be backend.Backend) {
+ enabled := false
+ as.be = be
+ tx := be.BatchTx()
+ tx.Lock()
+ _, vs := tx.UnsafeRange(buckets.Auth, enableFlagKey, nil, 0)
+ if len(vs) == 1 {
+ if bytes.Equal(vs[0], authEnabled) {
+ enabled = true
+ }
+ }
+
+ as.setRevision(getRevision(tx))
+
+ tx.Unlock()
+
+ as.enabledMu.Lock()
+ as.enabled = enabled
+ as.enabledMu.Unlock()
+}
+
+func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) {
+ return as.tokenProvider.info(ctx, token, as.Revision())
+}
+
+func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
+ // 这个函数的开销很大,所以我们需要一个缓存机制
+ if !as.IsAuthEnabled() {
+ return nil
+ }
+
+ // only gets rev == 0 when passed AuthInfo{}; no user given
+ if revision == 0 {
+ return ErrUserEmpty
+ }
+ rev := as.Revision()
+ if revision < rev {
+ as.lg.Warn("请求认证的版本小于当前节点认证的版本",
+ zap.Uint64("current node auth revision", rev),
+ zap.Uint64("request auth revision", revision),
+ zap.ByteString("request key", key),
+ zap.Error(ErrAuthOldRevision))
+ return ErrAuthOldRevision
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, userName)
+ if user == nil {
+ as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName))
+ return ErrPermissionDenied
+ }
+
+ // root role should have permission on all ranges
+ if hasRootRole(user) {
+ return nil
+ }
+
+ if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
+ return nil
+ }
+
+ return ErrPermissionDenied
+}
+
+func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
+ return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
+}
+
+func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
+ return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ) // '' ,0 ,health,nil
+}
+
+func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
+ return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
+}
+
+func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
+ if !as.IsAuthEnabled() {
+ return nil
+ }
+ if authInfo == nil || authInfo.Username == "" {
+ return ErrUserEmpty
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ u := getUser(as.lg, tx, authInfo.Username)
+ tx.Unlock()
+
+ if u == nil {
+ return ErrUserNotFound
+ }
+
+ if !hasRootRole(u) {
+ return ErrPermissionDenied
+ }
+
+ return nil
+}
+
+// IsAuthEnabled 是否启用认证
+func (as *authStore) IsAuthEnabled() bool {
+ as.enabledMu.RLock()
+ defer as.enabledMu.RUnlock()
+ return as.enabled
+}
+
+// NewAuthStore creates a new AuthStore.
+func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCost int) *authStore {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ if bcryptCost < bcrypt.MinCost || bcryptCost > bcrypt.MaxCost {
+ lg.Warn(
+ "使用默认的加密强度替换提供的加密强度",
+ zap.Int("min-cost", bcrypt.MinCost),
+ zap.Int("max-cost", bcrypt.MaxCost),
+ zap.Int("default-cost", bcrypt.DefaultCost),
+ zap.Int("given-cost", bcryptCost),
+ )
+ bcryptCost = bcrypt.DefaultCost
+ }
+
+ tx := be.BatchTx()
+ tx.Lock()
+
+ tx.UnsafeCreateBucket(buckets.Auth)
+ tx.UnsafeCreateBucket(buckets.AuthUsers)
+ tx.UnsafeCreateBucket(buckets.AuthRoles)
+
+ enabled := false
+ _, vs := tx.UnsafeRange(buckets.Auth, enableFlagKey, nil, 0)
+ if len(vs) == 1 {
+ if bytes.Equal(vs[0], authEnabled) {
+ enabled = true
+ }
+ }
+
+ as := &authStore{
+ revision: getRevision(tx),
+ lg: lg,
+ be: be,
+ enabled: enabled,
+ rangePermCache: make(map[string]*unifiedRangePermissions),
+ tokenProvider: tp,
+ bcryptCost: bcryptCost,
+ }
+
+ if enabled {
+ as.tokenProvider.enable()
+ }
+
+ if as.Revision() == 0 {
+ as.commitRevision(tx)
+ }
+
+ tx.Unlock()
+ be.ForceCommit()
+
+ return as
+}
+
+func hasRootRole(u *authpb.User) bool {
+ // u.Roles is sorted in UserGrantRole(), so we can use binary search.
+ idx := sort.SearchStrings(u.Roles, rootRole)
+ return idx != len(u.Roles) && u.Roles[idx] == rootRole
+}
+
+// ok 持久化,鉴权版本号
+func (as *authStore) commitRevision(tx backend.BatchTx) {
+ atomic.AddUint64(&as.revision, 1)
+ revBytes := make([]byte, revBytesLen)
+ binary.BigEndian.PutUint64(revBytes, as.Revision())
+ tx.UnsafePut(buckets.Auth, revisionKey, revBytes)
+}
+
+// ok
+func getRevision(tx backend.BatchTx) uint64 {
+ _, vs := tx.UnsafeRange(buckets.Auth, revisionKey, nil, 0)
+ if len(vs) != 1 {
+ return 0
+ }
+ return binary.BigEndian.Uint64(vs[0])
+}
+
+// ok
+
+func (as *authStore) setRevision(rev uint64) {
+ atomic.StoreUint64(&as.revision, rev)
+}
+
+// Revision 返回鉴权版本号
+func (as *authStore) Revision() uint64 {
+ return atomic.LoadUint64(&as.revision)
+}
+
+func (as *authStore) AuthInfoFromTLS(ctx context.Context) (ai *AuthInfo) {
+ peer, ok := peer.FromContext(ctx)
+ if !ok || peer == nil || peer.AuthInfo == nil {
+ return nil
+ }
+
+ tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
+ for _, chains := range tlsInfo.State.VerifiedChains {
+ if len(chains) < 1 {
+ continue
+ }
+ ai = &AuthInfo{
+ Username: chains[0].Subject.CommonName,
+ Revision: as.Revision(),
+ }
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ return nil
+ }
+
+ // gRPC-gateway proxy request to etcd etcd includes Grpcgateway-Accept
+ // header. The proxy uses etcd client etcd certificate. If the certificate
+ // has a CommonName we should never use this for authentication.
+ if gw := md["grpcgateway-accept"]; len(gw) > 0 {
+ as.lg.Warn(
+ "ignoring common name in gRPC-gateway proxy request",
+ zap.String("common-name", ai.Username),
+ zap.String("user-name", ai.Username),
+ zap.Uint64("revision", ai.Revision),
+ )
+ return nil
+ }
+ as.lg.Debug(
+ "found command name",
+ zap.String("common-name", ai.Username),
+ zap.String("user-name", ai.Username),
+ zap.Uint64("revision", ai.Revision),
+ )
+ break
+ }
+ return ai
+}
+
+func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ return nil, nil
+ }
+
+ // TODO(mitake|hexfusion) review unifying key names
+ ts, ok := md[rpctypes.TokenFieldNameGRPC]
+ if !ok {
+ ts, ok = md[rpctypes.TokenFieldNameSwagger]
+ }
+ if !ok {
+ return nil, nil
+ }
+
+ token := ts[0]
+ authInfo, uok := as.authInfoFromToken(ctx, token)
+ if !uok {
+ as.lg.Warn("invalid auth token", zap.String("token", token))
+ return nil, ErrInvalidAuthToken
+ }
+
+ return authInfo, nil
+}
+
+func (as *authStore) GenTokenPrefix() (string, error) {
+ return as.tokenProvider.genTokenPrefix()
+}
+
+func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, error) {
+ opts := strings.Split(optstr, ",")
+ tokenType := opts[0]
+
+ typeSpecificOpts := make(map[string]string)
+ for i := 1; i < len(opts); i++ {
+ pair := strings.Split(opts[i], "=")
+
+ if len(pair) != 2 {
+ if lg != nil {
+ lg.Error("invalid token option", zap.String("option", optstr))
+ }
+ return "", nil, ErrInvalidAuthOpts
+ }
+
+ if _, ok := typeSpecificOpts[pair[0]]; ok {
+ if lg != nil {
+ lg.Error(
+ "invalid token option",
+ zap.String("option", optstr),
+ zap.String("duplicate-parameter", pair[0]),
+ )
+ }
+ return "", nil, ErrInvalidAuthOpts
+ }
+
+ typeSpecificOpts[pair[0]] = pair[1]
+ }
+
+ return tokenType, typeSpecificOpts, nil
+}
+
+func NewTokenProvider(lg *zap.Logger, tokenOpts string, indexWaiter func(uint64) <-chan struct{}, TokenTTL time.Duration) (TokenProvider, error) { // token提供商
+ tokenType, typeSpecificOpts, err := decomposeOpts(lg, tokenOpts) // 认证格式 simple、jwt
+ if err != nil {
+ return nil, ErrInvalidAuthOpts
+ }
+
+ switch tokenType {
+ case tokenTypeSimple:
+ if lg != nil {
+ lg.Warn("简单令牌没有经过加密签名")
+ }
+ return newTokenProviderSimple(lg, indexWaiter, TokenTTL), nil
+
+ case tokenTypeJWT:
+ return newTokenProviderJWT(lg, typeSpecificOpts)
+
+ case "":
+ return newTokenProviderNop()
+
+ default:
+ if lg != nil {
+ lg.Warn(
+ "unknown token type",
+ zap.String("type", tokenType),
+ zap.Error(ErrInvalidAuthOpts),
+ )
+ }
+ return nil, ErrInvalidAuthOpts
+ }
+}
+
+func (as *authStore) WithRoot(ctx context.Context) context.Context {
+ if !as.IsAuthEnabled() {
+ return ctx
+ }
+
+ var ctxForAssign context.Context
+ if ts, ok := as.tokenProvider.(*tokenSimple); ok && ts != nil {
+ ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0))
+ prefix, err := ts.genTokenPrefix()
+ if err != nil {
+ as.lg.Error(
+ "failed to generate prefix of internally used token",
+ zap.Error(err),
+ )
+ return ctx
+ }
+ ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix)
+ } else {
+ ctxForAssign = ctx
+ }
+
+ token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision())
+ if err != nil {
+ // this must not happen
+ as.lg.Error(
+ "failed to assign token for lease revoking",
+ zap.Error(err),
+ )
+ return ctx
+ }
+
+ mdMap := map[string]string{
+ rpctypes.TokenFieldNameGRPC: token,
+ }
+ tokenMD := metadata.New(mdMap)
+
+ // use "mdIncomingKey{}" since it's called from local etcdserver
+ return metadata.NewIncomingContext(ctx, tokenMD)
+}
+
+func (as *authStore) BcryptCost() int {
+ return as.bcryptCost
+}
+
+// ---------------------------------------------------------------------------------------------------v
+
+// RoleRevokePermission ok
+func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := getRole(as.lg, tx, r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+
+ updatedRole := &authpb.Role{
+ Name: role.Name,
+ }
+
+ for _, perm := range role.KeyPermission {
+ if !strings.EqualFold(perm.Key, r.Key) || !strings.EqualFold(perm.RangeEnd, r.RangeEnd) {
+ updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm)
+ }
+ }
+
+ if len(role.KeyPermission) == len(updatedRole.KeyPermission) {
+ return nil, ErrPermissionNotGranted
+ }
+
+ putRole(as.lg, tx, updatedRole)
+
+ as.clearCachedPerm()
+ as.commitRevision(tx)
+
+ as.lg.Info("撤销对range的权限", zap.String("role-name", r.Role), zap.String("key", r.Key), zap.String("range-end", r.RangeEnd))
+ return &pb.AuthRoleRevokePermissionResponse{}, nil
+}
+
+// RoleGrantPermission ok
+func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ if r.Perm == nil {
+ return nil, ErrPermissionNotGiven
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := getRole(as.lg, tx, r.Name)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+ // 在已有的权限中, 寻找第一个与key相等的,没找到的话 idx =len(role.KeyPermission)
+ idx := sort.Search(len(role.KeyPermission), func(i int) bool {
+ // a,a 0
+ // a b -1
+ // b a 1
+ // a,b,c,d,e
+ // c
+ return strings.Compare(role.KeyPermission[i].Key, r.Perm.Key) >= 0
+ })
+
+ if idx < len(role.KeyPermission) && strings.EqualFold(role.KeyPermission[idx].Key, r.Perm.Key) && strings.EqualFold(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) {
+ // 更新存在的权限
+ role.KeyPermission[idx].PermType = r.Perm.PermType
+ } else {
+ newPerm := &authpb.Permission{
+ Key: r.Perm.Key, // /
+ RangeEnd: r.Perm.RangeEnd, // ""
+ PermType: r.Perm.PermType, // readwrite
+ }
+
+ role.KeyPermission = append(role.KeyPermission, newPerm)
+ // 按照key 升序排列
+ sort.Sort(permSlice(role.KeyPermission))
+ }
+
+ putRole(as.lg, tx, role)
+ // 目前,单个角色更新会使每个缓存失效,应该进行优化.
+ as.clearCachedPerm()
+
+ as.commitRevision(tx)
+
+ as.lg.Info("授予/更新用户权限", zap.String("user-name", r.Name), zap.String("permission-name", authpb.PermissionTypeName[int32(r.Perm.PermType)]))
+ return &pb.AuthRoleGrantPermissionResponse{}, nil
+}
+
+// RoleList ok
+func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ roles := getAllRoles(as.lg, tx)
+ tx.Unlock()
+
+ resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
+ for i := range roles {
+ resp.Roles[i] = string(roles[i].Name)
+ }
+ return resp, nil
+}
+
+// RoleDelete OK
+func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ if as.enabled && r.Role == rootRole {
+ as.lg.Error("不能删除 'root' 角色", zap.String("role-name", r.Role))
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := getRole(as.lg, tx, r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+
+ delRole(tx, r.Role)
+
+ users := getAllUsers(as.lg, tx) // 获取所有用户
+ for _, user := range users {
+ updatedUser := &authpb.User{
+ Name: user.Name,
+ Password: user.Password,
+ Options: user.Options,
+ }
+ for _, role := range user.Roles {
+ if role != r.Role {
+ updatedUser.Roles = append(updatedUser.Roles, role)
+ }
+ }
+ if len(updatedUser.Roles) == len(user.Roles) {
+ continue
+ }
+ putUser(as.lg, tx, updatedUser)
+ as.invalidateCachedPerm(user.Name)
+ }
+
+ as.commitRevision(tx)
+
+ as.lg.Info("删除了一个角色", zap.String("role-name", r.Role))
+ return &pb.AuthRoleDeleteResponse{}, nil
+}
+
+// RoleAdd OK
+func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ if len(r.Name) == 0 {
+ return nil, ErrRoleEmpty
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := getRole(as.lg, tx, r.Name)
+ if role != nil {
+ return nil, ErrRoleAlreadyExist
+ }
+
+ newRole := &authpb.Role{
+ Name: r.Name,
+ }
+
+ putRole(as.lg, tx, newRole)
+
+ as.commitRevision(tx)
+
+ as.lg.Info("创建了一个角色", zap.String("role-name", r.Name))
+ return &pb.AuthRoleAddResponse{}, nil
+}
+
+// RoleGet ok
+func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ var resp pb.AuthRoleGetResponse
+
+ role := getRole(as.lg, tx, r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+ resp.Perm = append(resp.Perm, role.KeyPermission...)
+ return &resp, nil
+}
+
+func (as *authStore) UserHasRole(user, role string) bool {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ u := getUser(as.lg, tx, user)
+ tx.Unlock()
+
+ if u == nil {
+ as.lg.Warn("'has-role'请求不存在的用户", zap.String("user-name", user), zap.String("role-name", role))
+ return false
+ }
+
+ for _, r := range u.Roles {
+ if role == r {
+ return true
+ }
+ }
+ return false
+}
+
+func getRole(lg *zap.Logger, tx backend.BatchTx, rolename string) *authpb.Role {
+ _, vs := tx.UnsafeRange(buckets.AuthRoles, []byte(rolename), nil, 0)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ role := &authpb.Role{}
+ err := role.Unmarshal(vs[0])
+ if err != nil {
+ lg.Panic("反序列化失败 'authpb.Role'", zap.Error(err))
+ }
+ return role
+}
+
+func getAllRoles(lg *zap.Logger, tx backend.BatchTx) []*authpb.Role {
+ _, vs := tx.UnsafeRange(buckets.AuthRoles, []byte{0}, []byte{0xff}, -1)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ roles := make([]*authpb.Role, len(vs))
+ for i := range vs {
+ role := &authpb.Role{}
+ err := role.Unmarshal(vs[i])
+ if err != nil {
+ lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err))
+ }
+ roles[i] = role
+ }
+ return roles
+}
+
+// ok
+func putRole(lg *zap.Logger, tx backend.BatchTx, role *authpb.Role) {
+ b, err := role.Marshal()
+ if err != nil {
+ lg.Panic("序列化失败'authpb.Role'", zap.String("role-name", role.Name), zap.Error(err))
+ }
+
+ tx.UnsafePut(buckets.AuthRoles, []byte(role.Name), b)
+}
+
+// ok
+func delRole(tx backend.BatchTx, rolename string) {
+ tx.UnsafeDelete(buckets.AuthRoles, []byte(rolename))
+}
+
+type permSlice []*authpb.Permission
+
+func (perms permSlice) Len() int {
+ return len(perms)
+}
+
+func (perms permSlice) Less(i, j int) bool {
+ // a,a 0
+ // a b -1
+ // b a 1
+
+ return strings.Compare(perms[i].Key, perms[j].Key) < 0
+}
+
+func (perms permSlice) Swap(i, j int) {
+ perms[i], perms[j] = perms[j], perms[i]
+}
+
+// ---------------------------------------------------------------------------------------------------v
+
+// 生成密码
+func (as *authStore) selectPassword(password string, hashedPassword string) ([]byte, error) {
+ if password != "" && hashedPassword == "" {
+ // 此路径用于处理由版本大于3.5的etcd创建的日志条目
+ return bcrypt.GenerateFromPassword([]byte(password), as.bcryptCost)
+ }
+ return base64.StdEncoding.DecodeString(hashedPassword)
+}
+
+func (as *authStore) CheckPassword(username, password string) (uint64, error) {
+ if !as.IsAuthEnabled() {
+ return 0, ErrAuthNotEnabled
+ }
+
+ var user *authpb.User
+ // CompareHashAndPassword is very expensive, so we use closures
+ // to avoid putting it in the critical section of the tx lock.
+ revision, err := func() (uint64, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user = getUser(as.lg, tx, username)
+ if user == nil {
+ return 0, ErrAuthFailed
+ }
+
+ if user.Options != nil && user.Options.NoPassword {
+ return 0, ErrNoPasswordUser
+ }
+
+ return getRevision(tx), nil
+ }()
+ if err != nil {
+ return 0, err
+ }
+
+ if bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)) != nil {
+ as.lg.Info("invalid password", zap.String("user-name", username))
+ return 0, ErrAuthFailed
+ }
+ return revision, nil
+}
+
+func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ if len(r.Name) == 0 {
+ return nil, ErrUserEmpty
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.Name)
+ if user != nil {
+ return nil, ErrUserAlreadyExist
+ }
+
+ options := r.Options
+ if options == nil {
+ options = &authpb.UserAddOptions{
+ NoPassword: false,
+ }
+ }
+
+ var password []byte
+ var err error
+
+ if !options.NoPassword {
+ password, err = as.selectPassword(r.Password, r.HashedPassword)
+ if err != nil {
+ return nil, ErrNoPasswordUser
+ }
+ }
+
+ newUser := &authpb.User{
+ Name: r.Name,
+ Password: string(password),
+ Options: options,
+ }
+
+ putUser(as.lg, tx, newUser)
+
+ as.commitRevision(tx)
+
+ as.lg.Info("添加一个用户", zap.String("user-name", r.Name))
+ return &pb.AuthUserAddResponse{}, nil
+}
+
+func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ if as.enabled && r.Name == rootUser {
+ as.lg.Error("不能删除 'root' 用户", zap.String("user-name", r.Name))
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.Name)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ delUser(tx, r.Name)
+
+ as.commitRevision(tx)
+
+ as.invalidateCachedPerm(r.Name)
+ as.tokenProvider.invalidateUser(r.Name)
+
+ as.lg.Info(
+ "删除了一个用户",
+ zap.String("user-name", r.Name),
+ zap.Strings("user-roles", user.Roles),
+ )
+ return &pb.AuthUserDeleteResponse{}, nil
+}
+
+func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.Name)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ var password []byte
+ var err error
+
+ if !user.Options.NoPassword {
+ password, err = as.selectPassword(r.Password, r.HashedPassword)
+ if err != nil {
+ return nil, ErrNoPasswordUser
+ }
+ }
+
+ updatedUser := &authpb.User{
+ Name: r.Name,
+ Roles: user.Roles,
+ Password: string(password),
+ Options: user.Options,
+ }
+
+ putUser(as.lg, tx, updatedUser)
+
+ as.commitRevision(tx)
+
+ as.invalidateCachedPerm(r.Name)
+ as.tokenProvider.invalidateUser(r.Name)
+
+ as.lg.Info(
+ "更该用户密码",
+ zap.String("user-name", r.Name),
+ zap.Strings("user-roles", user.Roles),
+ )
+ return &pb.AuthUserChangePasswordResponse{}, nil
+}
+
+func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.User)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ if r.Role != rootRole {
+ role := getRole(as.lg, tx, r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+ }
+
+ idx := sort.SearchStrings(user.Roles, r.Role)
+ if idx < len(user.Roles) && user.Roles[idx] == r.Role {
+ as.lg.Warn(
+ "ignored grant role request to a user",
+ zap.String("user-name", r.User),
+ zap.Strings("user-roles", user.Roles),
+ zap.String("duplicate-role-name", r.Role),
+ )
+ return &pb.AuthUserGrantRoleResponse{}, nil
+ }
+
+ user.Roles = append(user.Roles, r.Role)
+ sort.Strings(user.Roles)
+
+ putUser(as.lg, tx, user)
+
+ as.invalidateCachedPerm(r.User)
+
+ as.commitRevision(tx)
+
+ as.lg.Info(
+ "granted a role to a user",
+ zap.String("user-name", r.User),
+ zap.Strings("user-roles", user.Roles),
+ zap.String("added-role-name", r.Role),
+ )
+ return &pb.AuthUserGrantRoleResponse{}, nil
+}
+
+func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ user := getUser(as.lg, tx, r.Name)
+ tx.Unlock()
+
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ var resp pb.AuthUserGetResponse
+ resp.Roles = append(resp.Roles, user.Roles...)
+ return &resp, nil
+}
+
+func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ users := getAllUsers(as.lg, tx)
+ tx.Unlock()
+
+ resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
+ for i := range users {
+ resp.Users[i] = users[i].Name
+ }
+ return resp, nil
+}
+
+func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ if as.enabled && r.Name == rootUser && r.Role == rootRole {
+ as.lg.Error(
+ "'root'用户 不能移除 'root' 角色",
+ zap.String("user-name", r.Name),
+ zap.String("role-name", r.Role),
+ )
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.Name)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ updatedUser := &authpb.User{
+ Name: user.Name,
+ Password: user.Password,
+ Options: user.Options,
+ }
+
+ for _, role := range user.Roles {
+ if role != r.Role {
+ updatedUser.Roles = append(updatedUser.Roles, role)
+ }
+ }
+
+ if len(updatedUser.Roles) == len(user.Roles) {
+ return nil, ErrRoleNotGranted
+ }
+
+ putUser(as.lg, tx, updatedUser)
+
+ as.invalidateCachedPerm(r.Name)
+
+ as.commitRevision(tx)
+
+ as.lg.Info(
+ "移除用户角色",
+ zap.String("user-name", r.Name),
+ zap.Strings("old-user-roles", user.Roles),
+ zap.Strings("new-user-roles", updatedUser.Roles),
+ zap.String("revoked-role-name", r.Role),
+ )
+ return &pb.AuthUserRevokeRoleResponse{}, nil
+}
+
+func getUser(lg *zap.Logger, tx backend.BatchTx, username string) *authpb.User {
+ _, vs := tx.UnsafeRange(buckets.AuthUsers, []byte(username), nil, 0)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ user := &authpb.User{}
+ err := user.Unmarshal(vs[0])
+ if err != nil {
+ lg.Panic(
+ "failed to unmarshal 'authpb.User'",
+ zap.String("user-name", username),
+ zap.Error(err),
+ )
+ }
+ return user
+}
+
+// 获取所有用户
+func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User {
+ _, vs := tx.UnsafeRange(buckets.AuthUsers, []byte{0}, []byte{0xff}, -1)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ users := make([]*authpb.User, len(vs))
+ for i := range vs {
+ user := &authpb.User{}
+ err := user.Unmarshal(vs[i])
+ if err != nil {
+ lg.Panic("不能反序列化 'authpb.User'", zap.Error(err))
+ }
+ users[i] = user
+ }
+ return users
+}
+
+// OK
+func putUser(lg *zap.Logger, tx backend.BatchTx, user *authpb.User) {
+ b, err := user.Marshal()
+ if err != nil {
+ lg.Panic("序列化失败 'authpb.User'", zap.Error(err))
+ }
+ tx.UnsafePut(buckets.AuthUsers, []byte(user.Name), b)
+}
+
+func delUser(tx backend.BatchTx, username string) {
+ tx.UnsafeDelete(buckets.AuthUsers, []byte(username))
+}
diff --git a/etcd/config/over_config.go b/etcd/config/over_config.go
new file mode 100644
index 00000000000..cdc26c1e9bb
--- /dev/null
+++ b/etcd/config/over_config.go
@@ -0,0 +1,287 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/datadir"
+ "github.com/ls-2018/etcd_cn/pkg/netutil"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+// ServerConfig 持有从命令行或发现中获取的etcd的配置.
+type ServerConfig struct {
+ Name string
+ DiscoveryURL string // 节点发现
+ DiscoveryProxy string // discovery代理
+ ClientURLs types.URLs
+ PeerURLs types.URLs
+ DataDir string
+ DedicatedWALDir string // 配置将使etcd把WAL写到WALDir 而不是dataDir/member/wal.
+ SnapshotCount uint64 // 触发一次磁盘快照的提交事务的次数
+ SnapshotCatchUpEntries uint64 // 是slow follower在raft存储条目落后追赶的条目数量.我们希望follower与leader有一毫秒级的延迟.最大的吞吐量是10K左右.保持5K的条目就足以帮助follower赶上.
+ MaxSnapFiles uint
+ MaxWALFiles uint
+ BackendBatchInterval time.Duration // 提交后端事务前的最长时间
+ BackendBatchLimit int // 提交后端事务前的最大操作量
+ BackendFreelistType bolt.FreelistType // boltdb存储的类型
+ InitialPeerURLsMap types.URLsMap // 节点 --- 【 通信地址】可能绑定了多块网卡
+ InitialClusterToken string
+ NewCluster bool
+ PeerTLSInfo transport.TLSInfo
+ CORS map[string]struct{}
+ HostWhitelist map[string]struct{} // 列出了客户端请求中可接受的主机名.如果etcd是不安全的(没有TLS),etcd只接受其Host头值存在于此白名单的请求.
+
+ TickMs uint // tick计时器触发间隔
+ ElectionTicks int // 返回选举权检查对应多少次tick触发次数
+
+ // InitialElectionTickAdvance 是否提前初始化选举时钟启动,以便更快的选举
+ InitialElectionTickAdvance bool
+
+ BootstrapTimeout time.Duration // 引导超时
+
+ AutoCompactionRetention time.Duration
+ AutoCompactionMode string
+
+ CompactionBatchLimit int
+ QuotaBackendBytes int64 // bolt.db 存储上限 【字节】
+ MaxTxnOps uint // 事务中允许的最大操作数
+
+ // MaxRequestBytes raft发送的最大数据量
+ MaxRequestBytes uint
+
+ WarningApplyDuration time.Duration
+
+ StrictReconfigCheck bool // 严格配置变更检查
+ ClientCertAuthEnabled bool // 验证客户端证书是不是服务器CA签署的
+ AuthToken string // 认证格式 simple、jwt
+ BcryptCost uint // 为散列身份验证密码指定bcrypt算法的成本/强度默认10
+ TokenTTL uint
+
+ InitialCorruptCheck bool // 数据毁坏检测功能,运行之后,在开始服务之前
+ CorruptCheckTime time.Duration
+
+ PreVote bool // PreVote 是否启用PreVote
+
+ // SocketOpts are socket options passed to listener config.
+ SocketOpts transport.SocketOpts
+
+ // Logger logs etcd-side operations.
+ Logger *zap.Logger
+
+ ForceNewCluster bool
+
+ EnableLeaseCheckpoint bool // 允许leader定期向其他成员发送检查点,以防止leader变化时剩余TTL重置.
+ // LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints.
+ LeaseCheckpointInterval time.Duration
+ // LeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
+ LeaseCheckpointPersist bool
+
+ EnableGRPCGateway bool // 启用grpc网关,将 http 转换成 grpc / true
+
+ // ExperimentalEnableDistributedTracing 使用OpenTelemetry协议实现分布式跟踪.
+ ExperimentalEnableDistributedTracing bool // 默认false
+ // ExperimentalTracerOptions are options for OpenTelemetry gRPC interceptor.
+ ExperimentalTracerOptions []otelgrpc.Option
+
+ WatchProgressNotifyInterval time.Duration
+
+ // UnsafeNoFsync 禁用所有fsync的使用.设置这个是不安全的,会导致数据丢失.
+ UnsafeNoFsync bool `json:"unsafe-no-fsync"`
+
+ DowngradeCheckTime time.Duration
+
+ // ExperimentalMemoryMlock enables mlocking of etcd owned memory pages.
+ // The setting improves etcd tail latency in environments were:
+ // - memory pressure might lead to swapping pages to disk
+ // - disk latency might be unstable
+ // Currently all etcd memory gets mlocked, but in future the flag can
+ // be refined to mlock in-use area of bbolt only.
+ ExperimentalMemoryMlock bool `json:"experimental-memory-mlock"`
+
+ // ExperimentalTxnModeWriteWithSharedBuffer enable write transaction to use
+ // a shared buffer in its readonly check operations.
+ ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"`
+
+ // ExperimentalBootstrapDefragThresholdMegabytes 是指在启动过程中 etcd考虑运行碎片整理所需释放的最小兆字节数.需要设置为非零值才能生效.
+ ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"`
+
+ // V2Deprecation defines a phase of v2store deprecation process.
+ V2Deprecation V2DeprecationEnum `json:"v2-deprecation"`
+}
+
+// VerifyBootstrap 检查初始配置的引导情况,并对不应该发生的事情返回一个错误.
+func (c *ServerConfig) VerifyBootstrap() error {
+ if err := c.hasLocalMember(); err != nil { // initial-cluster 集群至少包含本机节点
+ return err
+ }
+ // 主要就是验证 这两个参数 --initial-advertise-peer-urls" and "--initial-cluster
+ if err := c.advertiseMatchesCluster(); err != nil {
+ return err
+ }
+ // 检查所有ip:port 有没有重复的,有就返回 true
+ if CheckDuplicateURL(c.InitialPeerURLsMap) {
+ return fmt.Errorf("初始集群有重复的网址%s", c.InitialPeerURLsMap)
+ }
+ if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
+ return fmt.Errorf("初始集群未设置,没有发现discovery的URL")
+ }
+ return nil
+}
+
+// VerifyJoinExisting 检查加入现有集群的初始配置,并对不应该发生的事情返回一个错误.
+func (c *ServerConfig) VerifyJoinExisting() error {
+ if err := c.hasLocalMember(); err != nil {
+ return err
+ }
+ if CheckDuplicateURL(c.InitialPeerURLsMap) {
+ return fmt.Errorf("初始集群 %s 有重复的地址", c.InitialPeerURLsMap)
+ }
+ if c.DiscoveryURL != "" {
+ return fmt.Errorf("discovery URL 不应该设置,当加入一个存在的初始集群")
+ }
+ return nil
+}
+
+// hasLocalMember 集群至少包含本机节点
+func (c *ServerConfig) hasLocalMember() error {
+ if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
+ return fmt.Errorf("不能再集群配置中发现本机 %q", c.Name)
+ }
+ return nil
+}
+
+// advertiseMatchesCluster 确认peer URL与集群cluster peer中的URL一致.
+func (c *ServerConfig) advertiseMatchesCluster() error {
+ urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
+ urls.Sort()
+ sort.Strings(apurls)
+ ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+ defer cancel()
+ ok, err := netutil.URLStringsEqual(ctx, c.Logger, apurls, urls.StringSlice())
+ if ok {
+ return nil
+ }
+
+ initMap, apMap := make(map[string]struct{}), make(map[string]struct{})
+ for _, url := range c.PeerURLs {
+ apMap[url.String()] = struct{}{}
+ }
+ for _, url := range c.InitialPeerURLsMap[c.Name] {
+ initMap[url.String()] = struct{}{}
+ }
+
+ var missing []string
+ for url := range initMap {
+ if _, ok := apMap[url]; !ok {
+ missing = append(missing, url)
+ }
+ }
+ if len(missing) > 0 {
+ for i := range missing {
+ missing[i] = c.Name + "=" + missing[i]
+ }
+ mstr := strings.Join(missing, ",")
+ apStr := strings.Join(apurls, ",")
+ return fmt.Errorf("--initial-cluster 有 %s但丢失了--initial-advertise-peer-urls=%s (%v)", mstr, apStr, err)
+ }
+
+ for url := range apMap {
+ if _, ok := initMap[url]; !ok {
+ missing = append(missing, url)
+ }
+ }
+ if len(missing) > 0 {
+ mstr := strings.Join(missing, ",")
+ umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
+ return fmt.Errorf("--initial-advertise-peer-urls 有 %s但丢失了--initial-cluster=%s", mstr, umap.String())
+ }
+
+ // resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed
+ apStr := strings.Join(apurls, ",")
+ umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
+ return fmt.Errorf("无法解决 %s 匹配--initial-cluster=%s 的问题(%v)", apStr, umap.String(), err)
+}
+
+// MemberDir default.etcd/member
+func (c *ServerConfig) MemberDir() string {
+ return datadir.ToMemberDir(c.DataDir)
+}
+
+// WALDir default.etcd/member/wal
+func (c *ServerConfig) WALDir() string {
+ if c.DedicatedWALDir != "" { // ""
+ return c.DedicatedWALDir
+ }
+ return datadir.ToWalDir(c.DataDir)
+}
+
+// SnapDir default.etcd/member/snap
+func (c *ServerConfig) SnapDir() string {
+ return datadir.ToSnapDir(c.DataDir)
+}
+
+func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
+
+// ReqTimeout 返回请求完成的超时时间
+func (c *ServerConfig) ReqTimeout() time.Duration {
+ // 5用于队列等待,计算和磁盘IO延迟+ 2倍选举超时
+ return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
+}
+
+// ElectionTimeout 选举超时
+func (c *ServerConfig) ElectionTimeout() time.Duration {
+ return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond
+}
+
+func (c *ServerConfig) PeerDialTimeout() time.Duration {
+ return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
+}
+
+// CheckDuplicateURL 检查所有ip:port 有没有重复的,有就返回 true
+func CheckDuplicateURL(urlsmap types.URLsMap) bool {
+ um := make(map[string]bool)
+ for _, urls := range urlsmap {
+ for _, url := range urls {
+ u := url.String()
+ if um[u] {
+ return true
+ }
+ um[u] = true
+ }
+ }
+ return false
+}
+
+// BootstrapTimeoutEffective 有效的Bootstrap超时
+func (c *ServerConfig) BootstrapTimeoutEffective() time.Duration {
+ if c.BootstrapTimeout != 0 {
+ return c.BootstrapTimeout
+ }
+ return time.Second
+}
+
+// BackendPath default.etcd/member/snap/db
+func (c *ServerConfig) BackendPath() string { return datadir.ToBackendFileName(c.DataDir) }
diff --git a/server/config/v2_deprecation.go b/etcd/config/v2_deprecation.go
similarity index 93%
rename from server/config/v2_deprecation.go
rename to etcd/config/v2_deprecation.go
index 862c3bb9343..828bd9a8f43 100644
--- a/server/config/v2_deprecation.go
+++ b/etcd/config/v2_deprecation.go
@@ -17,7 +17,7 @@ package config
type V2DeprecationEnum string
const (
- // No longer supported in v3.6
+ // Default in v3.5. Issues a warning if v2store have meaningful content.
V2_DEPR_0_NOT_YET = V2DeprecationEnum("not-yet")
// Default in v3.6. Meaningful v2 state is not allowed.
// The V2 files are maintained for v3.5 rollback.
@@ -28,7 +28,7 @@ const (
// ability to rollback to etcd v3.5.
V2_DEPR_2_GONE = V2DeprecationEnum("gone")
- V2_DEPR_DEFAULT = V2_DEPR_1_WRITE_ONLY
+ V2_DEPR_DEFAULT = V2_DEPR_0_NOT_YET
)
func (e V2DeprecationEnum) IsAtLeast(v2d V2DeprecationEnum) bool {
diff --git a/etcd/datadir/over_datadir.go b/etcd/datadir/over_datadir.go
new file mode 100644
index 00000000000..4e5e3cb4082
--- /dev/null
+++ b/etcd/datadir/over_datadir.go
@@ -0,0 +1,41 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package datadir
+
+import "path/filepath"
+
+const (
+ memberDirSegment = "member"
+ snapDirSegment = "snap"
+ walDirSegment = "wal"
+ backendFileSegment = "bolt.db"
+)
+
+func ToBackendFileName(dataDir string) string {
+ return filepath.Join(ToSnapDir(dataDir), backendFileSegment) // default.etcd/member/snap/db
+}
+
+// ToSnapDir 快照地址
+func ToSnapDir(dataDir string) string {
+ return filepath.Join(ToMemberDir(dataDir), snapDirSegment) // default.etcd/member/snap
+}
+
+func ToWalDir(dataDir string) string {
+ return filepath.Join(ToMemberDir(dataDir), walDirSegment) // default.etcd/member/wal
+}
+
+func ToMemberDir(dataDir string) string {
+ return filepath.Join(dataDir, memberDirSegment) // default.etcd/member
+}
diff --git a/etcd/embed/config.go b/etcd/embed/config.go
new file mode 100644
index 00000000000..559ea95a711
--- /dev/null
+++ b/etcd/embed/config.go
@@ -0,0 +1,935 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/srv"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/tlsutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/config"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3compactor"
+ "github.com/ls-2018/etcd_cn/pkg/flags"
+ "github.com/ls-2018/etcd_cn/pkg/netutil"
+
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/multierr"
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc"
+ "sigs.k8s.io/yaml"
+)
+
+const (
+ // 设置new为初始静态或DNS引导期间出现的所有成员.如果将此选项设置为existing.则etcd将尝试加入现有群集.
+ ClusterStateFlagNew = "new"
+ ClusterStateFlagExisting = "existing"
+
+ DefaultName = "default"
+ DefaultMaxSnapshots = 5
+ DefaultMaxWALs = 5
+ DefaultMaxTxnOps = uint(128)
+ DefaultWarningApplyDuration = 100 * time.Millisecond
+ DefaultMaxRequestBytes = 1.5 * 1024 * 1024
+ DefaultGRPCKeepAliveMinTime = 5 * time.Second
+ DefaultGRPCKeepAliveInterval = 2 * time.Hour
+ DefaultGRPCKeepAliveTimeout = 20 * time.Second
+ DefaultDowngradeCheckTime = 5 * time.Second
+
+ DefaultListenPeerURLs = "http://localhost:2380"
+ DefaultListenClientURLs = "http://localhost:2379"
+
+ DefaultLogOutput = "default"
+ JournalLogOutput = "systemd/journal"
+ StdErrLogOutput = "stderr"
+ StdOutLogOutput = "stdout"
+
+ // DefaultLogRotationConfig 是用于日志轮换的默认配置. 默认情况下,日志轮换是禁用的.
+ // MaxSize = 100 // MB
+ // MaxAge = 0 // days (no limit)
+ // MaxBackups = 0 // no limit
+ // LocalTime = false // use computers local time, UTC by default
+ // Compress = false // compress the rotated log in gzip format
+ DefaultLogRotationConfig = `{"maxsize": 100, "maxage": 0, "maxbackups": 0, "localtime": false, "compress": false}`
+
+ // ExperimentalDistributedTracingAddress is the default collector address.
+ ExperimentalDistributedTracingAddress = "localhost:4317"
+ // ExperimentalDistributedTracingServiceName is the default etcd service name.
+ ExperimentalDistributedTracingServiceName = "etcd"
+
+ // DefaultStrictReconfigCheck 拒绝可能导致仲裁丢失的重新配置请求
+ DefaultStrictReconfigCheck = true
+
+ // maxElectionMs specifies the maximum value of election timeout.
+ // More details are listed in ../Documentation/tuning.md#time-parameters.
+ maxElectionMs = 50000
+ // backend freelist map type
+ freelistArrayType = "array"
+)
+
+var (
+ ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " +
+ "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"")
+ ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
+ ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined")
+
+ DefaultInitialAdvertisePeerURLs = "http://localhost:2380"
+ DefaultAdvertiseClientURLs = "http://localhost:2379"
+
+ // netutil.GetDefaultHost()
+ defaultHostname string
+ defaultHostStatus error
+
+ // indirection for testing
+ getCluster = srv.GetCluster
+)
+
+var (
+ // CompactorModePeriodic
+ // 周期性压缩 eg. 1h
+ CompactorModePeriodic = v3compactor.ModePeriodic
+
+ // CompactorModeRevision "AutoCompactionRetention" is "1000",
+ // 当前版本为6000时,它将日志压缩到5000版本.
+ // 如果有足够多的日志,这将每5分钟运行一次.
+ CompactorModeRevision = v3compactor.ModeRevision
+)
+
+func init() {
+ defaultHostname, defaultHostStatus = netutil.GetDefaultHost()
+ fmt.Println("defaultHostname", defaultHostname)
+ fmt.Println("defaultHostStatus", defaultHostStatus)
+ // defaultHostname 172.17.0.2
+ // defaultHostStatus
+}
+
+// Config 保存配置etcd的参数etcd.
+type Config struct {
+ Name string `json:"name"` // 节点的名字
+ Dir string `json:"data-dir"` // 数据目录
+ // 独立设置wal目录.etcd会将WAL文件写入 --wal-dir而不是--data-dir. 独立的wal路径.有助于避免日志记录和其他IO操作之间的竞争.
+ WalDir string `json:"wal-dir"` // 专用wal目录的路径.
+
+ SnapshotCount uint64 `json:"snapshot-count"` // 触发一次磁盘快照的提交事务的次数
+
+ // SnapshotCatchUpEntries 是在压缩raft存储条目后,慢的follower要追赶的条目数.我们预计follower与leader之间有毫秒级的延迟.
+ // 最大吞吐量大约为10K.保持一个5K的条目就足够帮助follower赶上了.
+ SnapshotCatchUpEntries uint64
+
+ MaxSnapFiles uint `json:"max-snapshots"` // 最大快照数
+ MaxWalFiles uint `json:"max-wals"` // 要保留的最大wal文件数(0表示不受限制). 5
+
+ // TickMs是心脏跳动间隔的毫秒数.
+ // TODO:将tickMs和心跳tick解耦(目前的心跳tick=1)
+ // 使tick成为集群范围内的配置.
+ TickMs uint `json:"heartbeat-interval"` // 定时器触发间隔 100ms
+ ElectionMs uint `json:"election-timeout"` // 选举权检查周期 1s
+
+ // InitialElectionTickAdvance is true, then local member fast-forwards
+ // election ticks to speed up "initial" leader election trigger. This
+ // benefits the case of larger election ticks. For instance, cross
+ // datacenter deployment may require longer election timeout of 10-second.
+ // If true, local node does not need wait up to 10-second. Instead,
+ // forwards its election ticks to 8-second, and have only 2-second left
+ // before leader election.
+ //
+ // Major assumptions are that:
+ // - cluster has no active leader thus advancing ticks enables faster
+ // leader election, or
+ // - cluster already has an established leader, and rejoining follower
+ // is likely to receive heartbeats from the leader after tick advance
+ // and before election timeout.
+ //
+ // However, when network from leader to rejoining follower is congested,
+ // and the follower does not receive leader heartbeat within left election
+ // ticks, disruptive election has to happen thus affecting cluster
+ // availabilities.
+ //
+ // Disabling this would slow down initial bootstrap process for cross
+ // datacenter deployments. Make your own tradeoffs by configuring
+ // --initial-election-tick-advance at the cost of slow initial bootstrap.
+ //
+ // If single-node, it advances ticks regardless.
+ //
+ // See https://github.com/etcd-io/etcd/issues/9333 for more detail.
+ // todo 是否在开机时快进初始选举点.以加快选举速度.
+ InitialElectionTickAdvance bool `json:"initial-election-tick-advance"` // 是否提前初始化选举时钟启动,以便更快的选举
+
+ BoltBackendBatchInterval time.Duration `json:"backend-batch-interval"` // BackendBatchInterval是提交后端事务前的最长时间
+ BoltBackendBatchLimit int `json:"backend-batch-limit"` // BackendBatchLimit是提交后端事务前的最大操作数
+ BackendFreelistType string `json:"backend-bbolt-freelist-type"` // BackendFreelistType指定boltdb后端使用的freelist的类型(array and map是支持的类型).
+ QuotaBackendBytes int64 `json:"quota-backend-bytes"` // 当后端大小超过给定配额时(0默认为低空间配额).引发警报.
+ MaxTxnOps uint `json:"max-txn-ops"` // 事务中允许的最大操作数.
+ MaxRequestBytes uint `json:"max-request-bytes"` // 服务器将接受的最大客户端请求大小(字节).
+
+ LPUrls []url.URL // 和etcd server 成员之间通信的地址.用于监听其他etcd member的url
+ LCUrls []url.URL // 这个参数是etcd服务器自己监听时用的,也就是说,监听本机上的哪个网卡,哪个端口
+
+ APUrls []url.URL // 就是客户端(etcd server 等)跟etcd服务进行交互时请求的url
+ ACUrls []url.URL // 就是客户端(etcdctl/curl等)跟etcd服务进行交互时请求的url
+
+ ClientTLSInfo transport.TLSInfo // 与 etcdctl 交互的客户端证书信息
+ ClientAutoTLS bool
+
+ PeerTLSInfo transport.TLSInfo
+ PeerAutoTLS bool // 节点之间使用生成的证书通信;默认false
+ // SelfSignedCertValidity 客户端证书和同级证书的有效期,单位为年 ;etcd自动生成的 如果指定了ClientAutoTLS and PeerAutoTLS,
+ SelfSignedCertValidity uint `json:"self-signed-cert-validity"`
+
+ // CipherSuites is a list of supported TLS cipher suites between
+ // client/etcd and peers. If empty, Go auto-populates the list.
+ // Note that cipher suites are prioritized in the given order.
+ CipherSuites []string `json:"cipher-suites"`
+
+ ClusterState string `json:"initial-cluster-state"`
+ DNSCluster string `json:"discovery-srv"` // DNS srv域用于引导群集.
+ DNSClusterServiceName string `json:"discovery-srv-name"` // 使用DNS引导时查询的DNS srv名称的后缀.
+ Dproxy string `json:"discovery-proxy"` // 用于流量到发现服务的HTTP代理
+ Durl string `json:"discovery"` // 用于引导群集的发现URL.
+ InitialCluster string `json:"initial-cluster"` // 集群中所有节点的信息. default=http://localhost:2380
+ InitialClusterToken string `json:"initial-cluster-token"` // 此配置可使重新创建集群.即使配置和之前一样.也会再次生成新的集群和节点 uuid;否则会导致多个集群之间的冲突.造成未知的错误.
+ StrictReconfigCheck bool `json:"strict-reconfig-check"` // 严格配置变更检查
+
+ EnableV2 bool `json:"enable-v2"`
+ // AutoCompactionMode 基于时间保留模式 时间、修订版本
+ AutoCompactionMode string `json:"auto-compaction-mode"`
+
+ //--auto-compaction-mode=revision --auto-compaction-retention=1000 每5分钟自动压缩"latest revision" - 1000;
+ //--auto-compaction-mode=periodic --auto-compaction-retention=12h 每1小时自动压缩并保留12小时窗口.
+
+ AutoCompactionRetention string `json:"auto-compaction-retention"`
+
+ // GRPCKeepAliveMinTime 客户端在ping服务器之前应等待的最短持续时间间隔.
+ GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"`
+
+ // GRPCKeepAliveInterval 服务器到客户端ping的频率持续时间.以检查连接是否处于活动状态(0表示禁用).
+ GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"`
+ // GRPCKeepAliveTimeout 关闭非响应连接之前的额外持续等待时间(0表示禁用).20s
+ GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
+
+ // SocketOpts are socket options passed to listener config.
+ SocketOpts transport.SocketOpts
+
+ // PreVote 为真.以启用Raft预投票.如果启用.Raft会运行一个额外的选举阶段.以检查它是否会获得足够的票数来赢得选举.从而最大限度地减少干扰.
+ PreVote bool `json:"pre-vote"` // 默认false
+
+ CORS map[string]struct{}
+
+ // 列出可接受的来自HTTP客户端请求的主机名.客户端来源策略可以防止对不安全的etcd服务器的 "DNS重定向 "攻击.
+ // 也就是说.任何网站可以简单地创建一个授权的DNS名称.并将DNS指向 "localhost"(或任何其他地址).
+ // 然后.所有监听 "localhost "的etcd的HTTP端点都变得可以访问.从而容易受到DNS重定向攻击.
+ HostWhitelist map[string]struct{}
+
+ // UserHandlers 是用来注册用户处理程序的,只用于将etcd嵌入到其他应用程序中.
+ // map key 是处理程序的路径,你必须确保它不能与etcd的路径冲突.
+ UserHandlers map[string]http.Handler `json:"-"`
+ // ServiceRegister is for registering users' gRPC services. A simple usage example:
+ // cfg := embed.NewConfig()
+ // cfg.ServerRegister = func(s *grpc.Server) {
+ // pb.RegisterFooServer(s, &fooServer{})
+ // pb.RegisterBarServer(s, &barServer{})
+ // }
+ // embed.StartEtcd(cfg)
+ ServiceRegister func(*grpc.Server) `json:"-"`
+
+ AuthToken string `json:"auth-token"` // 认证格式 simple、jwt
+ BcryptCost uint `json:"bcrypt-cost"` // 为散列身份验证密码指定bcrypt算法的成本/强度.有效值介于4和31之间.默认值:10
+
+ AuthTokenTTL uint `json:"auth-token-ttl"` // token 有效期
+
+ ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"` // 数据毁坏检测功能
+ ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"` // 数据毁坏检测功能
+ // ExperimentalEnableV2V3 configures URLs that expose deprecated V2 API working on V3 store.
+ // Deprecated in v3.5.
+ // TODO: Delete in v3.6 (https://github.com/etcd-io/etcd/issues/12913)
+ ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"`
+ ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"` // 允许leader定期向其他成员发送检查点,以防止leader变化时剩余TTL重置.
+ // ExperimentalEnableLeaseCheckpointPersist
+ // 启用持续的剩余TTL,以防止长期租约的无限期自动续约.在v3.6中始终启用.应该用于确保从启用该功能的v3.5集群顺利升级.
+ // 需要启用 experimental-enable-lease-checkpoint
+ // Deprecated in v3.6.
+ // TODO: Delete in v3.7
+ ExperimentalEnableLeaseCheckpointPersist bool `json:"experimental-enable-lease-checkpoint-persist"`
+ ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
+ ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"`
+ // ExperimentalWarningApplyDuration 是时间长度.如果应用请求的时间超过这个值.就会产生一个警告.
+ ExperimentalWarningApplyDuration time.Duration `json:"experimental-warning-apply-duration"`
+ // ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd etcd to
+ // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect.
+ ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"`
+
+ // ForceNewCluster starts a new cluster even if previously started; unsafe.
+ ForceNewCluster bool `json:"force-new-cluster"`
+
+ EnablePprof bool `json:"enable-pprof"`
+ Metrics string `json:"metrics"` // basic ;extensive
+ ListenMetricsUrls []url.URL
+ ListenMetricsUrlsJSON string `json:"listen-metrics-urls"`
+
+ // ExperimentalEnableDistributedTracing 表示是否启用了使用OpenTelemetry的实验性追踪.
+ ExperimentalEnableDistributedTracing bool `json:"experimental-enable-distributed-tracing"`
+ // ExperimentalDistributedTracingAddress is the address of the OpenTelemetry Collector.
+ // Can only be set if ExperimentalEnableDistributedTracing is true.
+ ExperimentalDistributedTracingAddress string `json:"experimental-distributed-tracing-address"`
+ // ExperimentalDistributedTracingServiceName is the name of the service.
+ // Can only be used if ExperimentalEnableDistributedTracing is true.
+ ExperimentalDistributedTracingServiceName string `json:"experimental-distributed-tracing-service-name"`
+ // ExperimentalDistributedTracingServiceInstanceID is the ID key of the service.
+ // This ID必须是unique, as helps to distinguish instances of the same service
+ // that exist at the same time.
+ // Can only be used if ExperimentalEnableDistributedTracing is true.
+ ExperimentalDistributedTracingServiceInstanceID string `json:"experimental-distributed-tracing-instance-id"`
+
+ // Logger 使用哪种logger
+ Logger string `json:"logger"`
+ // LogLevel 日志等级 debug, info, warn, error, panic, or fatal. Default 'info'.
+ LogLevel string `json:"log-level"`
+ // LogOutputs is either:
+ // - "default" as os.Stderr,
+ // - "stderr" as os.Stderr,
+ // - "stdout" as os.Stdout,
+ // - file path to append etcd logs to.
+ // 当 logger是zap时,它可以是多个.
+ LogOutputs []string `json:"log-outputs"`
+ // EnableLogRotation 启用单个日志输出文件目标的日志旋转.
+ EnableLogRotation bool `json:"enable-log-rotation"`
+ // LogRotationConfigJSON is a passthrough allowing a log rotation JSON config to be passed directly.
+ LogRotationConfigJSON string `json:"log-rotation-config-json"`
+ // ZapLoggerBuilder 用于给自己构造一个zap logger
+ ZapLoggerBuilder func(*Config) error
+
+ // logger logs etcd-side operations. The default is nil,
+ // and "setupLogging"必须是called before starting etcd.
+ // Do not set logger directly.
+ loggerMu *sync.RWMutex
+ logger *zap.Logger
+ // EnableGRPCGateway 启用grpc网关,将 http 转换成 grpc / true
+ EnableGRPCGateway bool `json:"enable-grpc-gateway"`
+
+ // UnsafeNoFsync 禁用所有fsync的使用.设置这个是不安全的,会导致数据丢失.
+ UnsafeNoFsync bool `json:"unsafe-no-fsync"` // 默认false
+ // 两次降级状态检查之间的时间间隔.
+ ExperimentalDowngradeCheckTime time.Duration `json:"experimental-downgrade-check-time"`
+
+ // ExperimentalMemoryMlock 启用对etcd拥有的内存页的锁定. 该设置改善了以下环境中的etcd尾部延迟.
+ // - 内存压力可能会导致将页面交换到磁盘上
+ // - 磁盘延迟可能是不稳定的
+ // 目前,所有的etcd内存都被锁住了,但在将来,这个标志可以改进为只锁住bbolt的使用区域.
+ ExperimentalMemoryMlock bool `json:"experimental-memory-mlock"`
+
+ // ExperimentalTxnModeWriteWithSharedBuffer 使得写事务在其只读检查操作中使用一个共享缓冲区.
+ ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"`
+
+ // V2Deprecation describes phase of API & Storage V2 support
+ V2Deprecation config.V2DeprecationEnum `json:"v2-deprecation"`
+}
+
+// configYAML holds the config suitable for yaml parsing
+type configYAML struct {
+ Config
+ configJSON
+}
+
+// configJSON 有文件选项,被翻译成配置选项
+type configJSON struct {
+ LPUrlsJSON string `json:"listen-peer-urls"` // 集群节点之间通信监听的URL;如果指定的IP是0.0.0.0,那么etcd 会监昕所有网卡的指定端口
+ LCUrlsJSON string `json:"listen-client-urls"`
+ APUrlsJSON string `json:"initial-advertise-peer-urls"`
+ ACUrlsJSON string `json:"advertise-client-urls"`
+
+ CORSJSON string `json:"cors"`
+ HostWhitelistJSON string `json:"host-whitelist"`
+
+ ClientSecurityJSON securityConfig `json:"client-transport-security"`
+ PeerSecurityJSON securityConfig `json:"peer-transport-security"`
+}
+
+type securityConfig struct {
+ CertFile string `json:"cert-file"`
+ KeyFile string `json:"key-file"`
+ ClientCertFile string `json:"client-cert-file"`
+ ClientKeyFile string `json:"client-key-file"`
+ CertAuth bool `json:"client-cert-auth"`
+ TrustedCAFile string `json:"trusted-ca-file"`
+ AutoTLS bool `json:"auto-tls"`
+}
+
+// NewConfig 创建一个用默认值填充的新配置.
+func NewConfig() *Config {
+ lpurl, _ := url.Parse(DefaultListenPeerURLs) // "http://localhost:2380"
+ apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs) // "http://localhost:2380"
+ lcurl, _ := url.Parse(DefaultListenClientURLs) // "http://localhost:2379"
+ acurl, _ := url.Parse(DefaultAdvertiseClientURLs) // "http://localhost:2379"
+ cfg := &Config{
+ MaxSnapFiles: DefaultMaxSnapshots, // 最大快照数
+ MaxWalFiles: DefaultMaxWALs, // wal文件的最大保留数量(0不受限制).
+
+ Name: DefaultName, // 节点的名字
+
+ SnapshotCount: etcdserver.DefaultSnapshotCount, // 快照数量
+ SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries, // 触发快照到磁盘的已提交事务数.
+
+ MaxTxnOps: DefaultMaxTxnOps, // 事务中允许的最大操作数. 128
+ MaxRequestBytes: DefaultMaxRequestBytes, // 最大请求体, 1.5M
+ ExperimentalWarningApplyDuration: DefaultWarningApplyDuration, // 是时间长度.如果应用请求的时间超过这个值.就会产生一个警告. 100ms
+
+ GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, // 客户端在ping服务器之前应等待的最短持续时间间隔. 5s
+ GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, // 服务器到客户端ping的探活周期.以检查连接是否处于活动状态(0表示禁用).2h
+ GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, // 关闭非响应连接之前的额外持续等待时间(0表示禁用).20s
+
+ SocketOpts: transport.SocketOpts{}, // 套接字配置
+
+ TickMs: 100, // 心跳间隔100ms
+ ElectionMs: 1000, // 选举超时 1s
+ InitialElectionTickAdvance: true,
+
+ LPUrls: []url.URL{*lpurl}, // "http://localhost:2380"
+ LCUrls: []url.URL{*lcurl}, // "http://localhost:2380"
+ APUrls: []url.URL{*apurl}, // "http://localhost:2379"
+ ACUrls: []url.URL{*acurl}, // "http://localhost:2379"
+
+ // 设置new为初始静态或DNS引导期间出现的所有成员.如果将此选项设置为existing.则etcd将尝试加入现有群集.
+ ClusterState: ClusterStateFlagNew, // 状态标志、默认new
+ InitialClusterToken: "etcd-cluster",
+ StrictReconfigCheck: DefaultStrictReconfigCheck, // 拒绝可能导致仲裁丢失的重新配置请求
+ Metrics: "basic", // 基本的
+
+ CORS: map[string]struct{}{"*": {}}, // 跨域请求
+ HostWhitelist: map[string]struct{}{"*": {}}, // 主机白名单
+
+ AuthToken: "simple", // 认证格式 simple、jwt
+ BcryptCost: uint(bcrypt.DefaultCost), // 为散列身份验证密码指定bcrypt算法的成本/强度
+ AuthTokenTTL: 300, // token 有效期
+
+ PreVote: true, // Raft会运行一个额外的选举阶段.以检查它是否会获得足够的票数来赢得选举.从而最大限度地减少干扰.
+
+ loggerMu: new(sync.RWMutex),
+ logger: nil,
+ Logger: "zap",
+ LogOutputs: []string{DefaultLogOutput}, // os.Stderr
+ LogLevel: logutil.DefaultLogLevel, // info
+ EnableLogRotation: false, // 默认不允许日志旋转
+ LogRotationConfigJSON: DefaultLogRotationConfig, // 是用于日志轮换的默认配置. 默认情况下,日志轮换是禁用的.
+ EnableGRPCGateway: true, // 将http->grpc
+ // 实验性
+ ExperimentalDowngradeCheckTime: DefaultDowngradeCheckTime, // 两次降级状态检查之间的时间间隔.
+ ExperimentalMemoryMlock: false, // 内存页锁定
+ ExperimentalTxnModeWriteWithSharedBuffer: true, // 启用写事务在其只读检查操作中使用共享缓冲区.
+
+ V2Deprecation: config.V2_DEPR_DEFAULT, // not-yet
+ }
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ return cfg
+}
+
+// ConfigFromFile OK
+func ConfigFromFile(path string) (*Config, error) {
+ cfg := &configYAML{Config: *NewConfig()}
+ if err := cfg.configFromFile(path); err != nil { // ✅
+ return nil, err
+ }
+ return &cfg.Config, nil
+}
+
+// OK
+func (cfg *configYAML) configFromFile(path string) error {
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ defaultInitialCluster := cfg.InitialCluster
+
+ err = yaml.Unmarshal(b, cfg)
+ if err != nil {
+ return err
+ }
+
+ if cfg.LPUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "设置时出现意外错误 listen-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.LPUrls = []url.URL(u)
+ }
+
+ if cfg.LCUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "设置时出现意外错误 listen-client-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.LCUrls = []url.URL(u)
+ }
+
+ if cfg.APUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "设置时出现意外错误 initial-advertise-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.APUrls = []url.URL(u)
+ }
+
+ if cfg.ACUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "设置时出现意外错误 advertise-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.ACUrls = []url.URL(u)
+ }
+
+ if cfg.ListenMetricsUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "设置时出现意外错误 listen-metrics-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.ListenMetricsUrls = []url.URL(u)
+ }
+
+ if cfg.CORSJSON != "" {
+ uv := flags.NewUniqueURLsWithExceptions(cfg.CORSJSON, "*")
+ cfg.CORS = uv.Values
+ }
+
+ if cfg.HostWhitelistJSON != "" {
+ uv := flags.NewUniqueStringsValue(cfg.HostWhitelistJSON)
+ cfg.HostWhitelist = uv.Values
+ }
+
+ // 如果设置了discovery flag则清除由InitialClusterFromName设置的默认初始集群
+ if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = ""
+ }
+ if cfg.ClusterState == "" {
+ cfg.ClusterState = ClusterStateFlagNew
+ }
+
+ copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) {
+ tls.CertFile = ysc.CertFile
+ tls.KeyFile = ysc.KeyFile
+ tls.ClientCertFile = ysc.ClientCertFile
+ tls.ClientKeyFile = ysc.ClientKeyFile
+ tls.ClientCertAuth = ysc.CertAuth
+ tls.TrustedCAFile = ysc.TrustedCAFile
+ }
+ copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON)
+ copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON)
+ cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS
+ cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS
+ if cfg.SelfSignedCertValidity == 0 {
+ cfg.SelfSignedCertValidity = 1
+ }
+ return cfg.Validate() // ✅
+}
+
+// 更新密码套件
+func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
+ if len(tls.CipherSuites) > 0 && len(ss) > 0 {
+ return fmt.Errorf("TLSInfo.CipherSuites已经指定(given %v)", ss)
+ }
+ if len(ss) > 0 {
+ cs := make([]uint16, len(ss))
+ for i, s := range ss {
+ var ok bool
+ cs[i], ok = tlsutil.GetCipherSuite(s)
+ if !ok {
+ return fmt.Errorf("unexpected TLS cipher suite %q", s)
+ }
+ }
+ tls.CipherSuites = cs
+ }
+ return nil
+}
+
+// Validate 确保 '*embed.Config' 字段是正确配置的.
+func (cfg *Config) Validate() error {
+ if err := cfg.setupLogging(); err != nil { // ✅
+ return err
+ }
+ if err := checkBindURLs(cfg.LPUrls); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.LCUrls); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
+ return err
+ }
+ if err := checkHostURLs(cfg.APUrls); err != nil {
+ addrs := cfg.getAPURLs()
+ return fmt.Errorf(`--initial-advertise-peer-urls %q 必须是 "host:port" (%v)`, strings.Join(addrs, ","), err)
+ }
+ if err := checkHostURLs(cfg.ACUrls); err != nil {
+ addrs := cfg.getACURLs()
+ return fmt.Errorf(`--advertise-client-urls %q 必须是 "host:port" (%v)`, strings.Join(addrs, ","), err)
+ }
+ // 检查是否有冲突的标志通过.
+ nSet := 0
+ for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} {
+ if v {
+ nSet++
+ }
+ }
+
+ if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting {
+ return fmt.Errorf("意料之外的集群状态 %q", cfg.ClusterState)
+ }
+
+ if nSet > 1 {
+ return ErrConflictBootstrapFlags
+ }
+
+ if cfg.TickMs == 0 {
+ return fmt.Errorf("--heartbeat-interval必须是>0 (set to %dms)", cfg.TickMs)
+ }
+ if cfg.ElectionMs == 0 {
+ return fmt.Errorf("--election-timeout必须是>0 (set to %dms)", cfg.ElectionMs)
+ }
+ if 5*cfg.TickMs > cfg.ElectionMs {
+ return fmt.Errorf("--election-timeout[%vms] 必须是5倍 --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs)
+ }
+ if cfg.ElectionMs > maxElectionMs {
+ return fmt.Errorf("--election-timeout[%vms] 时间太长,应该小于 %vms", cfg.ElectionMs, maxElectionMs)
+ }
+
+ // 最后检查一下,因为在etcdmain中代理可能会使这个问题得到解决.
+ if cfg.LCUrls != nil && cfg.ACUrls == nil {
+ return ErrUnsetAdvertiseClientURLsFlag
+ }
+
+ switch cfg.AutoCompactionMode {
+ case "":
+ case CompactorModeRevision, CompactorModePeriodic:
+ default:
+ return fmt.Errorf("未知的 auto-compaction-mode %q", cfg.AutoCompactionMode)
+ }
+ // false,false 不会走
+ if !cfg.ExperimentalEnableLeaseCheckpointPersist && cfg.ExperimentalEnableLeaseCheckpoint {
+ cfg.logger.Warn("检测到启用了Checkpoint而没有持久性.考虑启用experimental-enable-le-checkpoint-persist")
+ }
+ if !cfg.ExperimentalEnableLeaseCheckpoint && !cfg.ExperimentalEnableLeaseCheckpointPersist {
+ // falsefalse 默认走这里
+ return nil
+ } else if cfg.ExperimentalEnableLeaseCheckpoint && cfg.ExperimentalEnableLeaseCheckpointPersist {
+ return nil
+ }
+ return fmt.Errorf(" experimental-enable-lease-checkpoint-persist experimental-enable-lease-checkpoint 需要同时开启")
+}
+
+// PeerURLsMapAndToken 设置一个初始的peer URLsMap 和token,用于启动或发现.
+func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) {
+ token = cfg.InitialClusterToken
+ switch {
+ // todo 以下手动注释掉,一般不会使用以下的
+ //case cfg.Durl != "": // 用于引导群集的发现URL
+ // urlsmap = types.URLsMap{}
+ // // 如果使用discovery,根据advertised peer URLs 生成一个临时的集群
+ // urlsmap[cfg.Name] = cfg.APUrls
+ // token = cfg.Durl
+ //
+ //case cfg.DNSCluster != "": // DNS srv域用于引导群集.
+ // clusterStrs, cerr := cfg.GetDNSClusterNames()
+ // lg := cfg.logger
+ // if cerr != nil {
+ // lg.Warn("如法解析 SRV discovery", zap.Error(cerr))
+ // }
+ // if len(clusterStrs) == 0 {
+ // return nil, "", cerr
+ // }
+ // for _, s := range clusterStrs {
+ // lg.Info("got bootstrap from DNS for etcd-etcd", zap.String("node", s))
+ // }
+ // clusterStr := strings.Join(clusterStrs, ",")
+ // if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" {
+ // cfg.PeerTLSInfo.ServerName = cfg.DNSCluster
+ // }
+ // urlsmap, err = types.NewURLsMap(clusterStr)
+ // // only etcd member must belong to the discovered cluster.
+ // // proxy does not need to belong to the discovered cluster.
+ // if which == "etcd" {
+ // if _, ok := urlsmap[cfg.Name]; !ok {
+ // return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name)
+ // }
+ // }
+
+ default:
+ // 我们是静态配置的,
+ // infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380
+ urlsmap, err = types.NewURLsMap(cfg.InitialCluster) // 仅仅是类型转换
+ }
+ return urlsmap, token, err
+}
+
+// GetDNSClusterNames 使用DNS SRV记录来获取集群启动的初始节点列表.这个函数将返回一个或多个节点的列表,以及在执行服务发现时遇到的任何错误.
+// Note: Because this checks multiple sets of SRV records, discovery should only be considered to have
+// failed if the returned node list is empty.
+func (cfg *Config) GetDNSClusterNames() ([]string, error) {
+ var (
+ clusterStrs []string
+ cerr error
+ serviceNameSuffix string
+ )
+ if cfg.DNSClusterServiceName != "" {
+ serviceNameSuffix = "-" + cfg.DNSClusterServiceName
+ }
+
+ lg := cfg.GetLogger()
+
+ // Use both etcd-etcd-ssl and etcd-etcd for discovery.
+ // Combine the results if both are available.
+ clusterStrs, cerr = getCluster("https", "etcd-etcd-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
+ if cerr != nil {
+ clusterStrs = make([]string, 0)
+ }
+ lg.Info(
+ "get cluster for etcd-etcd-ssl SRV",
+ zap.String("service-scheme", "https"),
+ zap.String("service-name", "etcd-etcd-ssl"+serviceNameSuffix),
+ zap.String("etcd-name", cfg.Name),
+ zap.String("discovery-srv", cfg.DNSCluster),
+ zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
+ zap.Strings("found-cluster", clusterStrs),
+ zap.Error(cerr),
+ )
+
+ defaultHTTPClusterStrs, httpCerr := getCluster("http", "etcd-etcd"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
+ if httpCerr == nil {
+ clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...)
+ }
+ lg.Info(
+ "get cluster for etcd-etcd SRV",
+ zap.String("service-scheme", "http"),
+ zap.String("service-name", "etcd-etcd"+serviceNameSuffix),
+ zap.String("etcd-name", cfg.Name),
+ zap.String("discovery-srv", cfg.DNSCluster),
+ zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
+ zap.Strings("found-cluster", clusterStrs),
+ zap.Error(httpCerr),
+ )
+
+ return clusterStrs, multierr.Combine(cerr, httpCerr)
+}
+
+// 初始化集群节点列表 default=http://localhost:2380
+func (cfg Config) InitialClusterFromName(name string) (ret string) {
+ if len(cfg.APUrls) == 0 {
+ return ""
+ }
+ n := name
+ if name == "" {
+ n = DefaultName
+ }
+ for i := range cfg.APUrls {
+ ret = ret + "," + n + "=" + cfg.APUrls[i].String()
+ }
+ return ret[1:]
+}
+
+func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
+
+// ElectionTicks 返回选举权检查对应多少次tick触发次数
+func (cfg Config) ElectionTicks() int {
+ return int(cfg.ElectionMs / cfg.TickMs)
+}
+
+func (cfg Config) V2DeprecationEffective() config.V2DeprecationEnum {
+ if cfg.V2Deprecation == "" {
+ return config.V2_DEPR_DEFAULT
+ }
+ return cfg.V2Deprecation
+}
+
+func (cfg Config) defaultPeerHost() bool {
+ return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
+}
+
+func (cfg Config) defaultClientHost() bool {
+ return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
+}
+
+// ClientSelfCert etcd LCUrls客户端自签
+func (cfg *Config) ClientSelfCert() (err error) {
+ if !cfg.ClientAutoTLS {
+ return nil
+ }
+ if !cfg.ClientTLSInfo.Empty() {
+ cfg.logger.Warn("忽略客户端自动TLS,因为已经给出了证书")
+ return nil
+ }
+ chosts := make([]string, len(cfg.LCUrls))
+ for i, u := range cfg.LCUrls {
+ chosts[i] = u.Host
+ }
+ cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts, cfg.SelfSignedCertValidity)
+ if err != nil {
+ return err
+ }
+ return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites)
+}
+
+// PeerSelfCert etcd LPUrls客户端自签
+func (cfg *Config) PeerSelfCert() (err error) {
+ if !cfg.PeerAutoTLS {
+ return nil
+ }
+ if !cfg.PeerTLSInfo.Empty() {
+ cfg.logger.Warn("如果证书给出 则忽略peer自动TLS")
+ return nil
+ }
+ phosts := make([]string, len(cfg.LPUrls))
+ for i, u := range cfg.LPUrls {
+ phosts[i] = u.Host
+ }
+ cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts, cfg.SelfSignedCertValidity) // ?年
+ if err != nil {
+ return err
+ }
+ return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites)
+}
+
+// UpdateDefaultClusterFromName 更新集群通信地址
+func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) {
+ // default=http://localhost:2380
+ if defaultHostname == "" || defaultHostStatus != nil {
+ // 当 指定名称时,更新'initial-cluster'(例如,'etcd --name=abc').
+ if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ }
+ return "", defaultHostStatus
+ }
+
+ used := false
+ pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port()
+ if cfg.defaultPeerHost() && pip == "0.0.0.0" {
+ cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
+ used = true
+ }
+ // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
+ if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ }
+
+ cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port()
+ if cfg.defaultClientHost() && cip == "0.0.0.0" {
+ cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
+ used = true
+ }
+ dhost := defaultHostname
+ if !used {
+ dhost = ""
+ }
+ return dhost, defaultHostStatus
+}
+
+// checkBindURLs 如果任何URL使用域名,则返回错误.
+func checkBindURLs(urls []url.URL) error {
+ for _, url := range urls {
+ if url.Scheme == "unix" || url.Scheme == "unixs" {
+ continue
+ }
+ host, _, err := net.SplitHostPort(url.Host)
+ if err != nil {
+ return err
+ }
+ if host == "localhost" {
+ // special case for local address
+ // TODO: support /etc/hosts ?
+ continue
+ }
+ if net.ParseIP(host) == nil {
+ return fmt.Errorf("expected IP in URL for binding (%s)", url.String())
+ }
+ }
+ return nil
+}
+
+func checkHostURLs(urls []url.URL) error {
+ for _, url := range urls {
+ host, _, err := net.SplitHostPort(url.Host)
+ if err != nil {
+ return err
+ }
+ if host == "" {
+ return fmt.Errorf("unexpected empty host (%s)", url.String())
+ }
+ }
+ return nil
+}
+
+func (cfg *Config) getAPURLs() (ss []string) {
+ ss = make([]string, len(cfg.APUrls))
+ for i := range cfg.APUrls {
+ ss[i] = cfg.APUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getLPURLs() (ss []string) {
+ ss = make([]string, len(cfg.LPUrls))
+ for i := range cfg.LPUrls {
+ ss[i] = cfg.LPUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getACURLs() (ss []string) {
+ ss = make([]string, len(cfg.ACUrls))
+ for i := range cfg.ACUrls {
+ ss[i] = cfg.ACUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getLCURLs() (ss []string) {
+ ss = make([]string, len(cfg.LCUrls))
+ for i := range cfg.LCUrls {
+ ss[i] = cfg.LCUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getMetricsURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenMetricsUrls))
+ for i := range cfg.ListenMetricsUrls {
+ ss[i] = cfg.ListenMetricsUrls[i].String()
+ }
+ return ss
+}
+
+// 返回boltdb存储的数据类型
+func parseBackendFreelistType(freelistType string) bolt.FreelistType {
+ if freelistType == freelistArrayType {
+ return bolt.FreelistArrayType
+ }
+
+ return bolt.FreelistMapType
+}
diff --git a/etcd/embed/config_logging.go b/etcd/embed/config_logging.go
new file mode 100644
index 00000000000..157dc31e890
--- /dev/null
+++ b/etcd/embed/config_logging.go
@@ -0,0 +1,252 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zapgrpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/grpclog"
+ "gopkg.in/natefinch/lumberjack.v2"
+)
+
+// GetLogger
+// err := cfg.ZapLoggerBuilder(cfg)
+func (cfg Config) GetLogger() *zap.Logger {
+ cfg.loggerMu.RLock()
+ l := cfg.logger
+ cfg.loggerMu.RUnlock()
+ return l
+}
+
+// setupLogging 初始化etcd日志.必须在标志解析或完成配置embed.Config后调用.
+func (cfg *Config) setupLogging() error {
+ switch cfg.Logger {
+ case "zap":
+ if len(cfg.LogOutputs) == 0 {
+ cfg.LogOutputs = []string{DefaultLogOutput}
+ }
+ if len(cfg.LogOutputs) > 1 {
+ for _, v := range cfg.LogOutputs {
+ if v == DefaultLogOutput {
+ return fmt.Errorf("目前还不支持%q的多重日志输出", DefaultLogOutput)
+ }
+ }
+ }
+ // todo
+ if cfg.EnableLogRotation {
+ if err := setupLogRotation(cfg.LogOutputs, cfg.LogRotationConfigJSON); err != nil {
+ return err
+ }
+ }
+
+ outputPaths, errOutputPaths := make([]string, 0), make([]string, 0)
+ isJournal := false
+ for _, v := range cfg.LogOutputs {
+ switch v {
+ case DefaultLogOutput:
+ outputPaths = append(outputPaths, StdErrLogOutput)
+ errOutputPaths = append(errOutputPaths, StdErrLogOutput)
+
+ case JournalLogOutput:
+ isJournal = true
+
+ case StdErrLogOutput:
+ outputPaths = append(outputPaths, StdErrLogOutput)
+ errOutputPaths = append(errOutputPaths, StdErrLogOutput)
+
+ case StdOutLogOutput:
+ outputPaths = append(outputPaths, StdOutLogOutput)
+ errOutputPaths = append(errOutputPaths, StdOutLogOutput)
+
+ default:
+ var path string
+ if cfg.EnableLogRotation {
+ // append rotate scheme to logs managed by lumberjack log rotation
+ if v[0:1] == "/" {
+ path = fmt.Sprintf("rotate:/%%2F%s", v[1:])
+ } else {
+ path = fmt.Sprintf("rotate:/%s", v)
+ }
+ } else {
+ path = v
+ }
+ outputPaths = append(outputPaths, path)
+ errOutputPaths = append(errOutputPaths, path)
+ }
+ }
+
+ if !isJournal {
+ copied := logutil.DefaultZapLoggerConfig
+ copied.OutputPaths = outputPaths
+ copied.ErrorOutputPaths = errOutputPaths
+ copied = logutil.MergeOutputPaths(copied) // /dev/null 判断
+ copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) // 是一个方便的函数,它创建一个AtomicLevel,然后用给定的级别调用SetLevel.
+ if cfg.ZapLoggerBuilder == nil {
+ lg, err := copied.Build() // 从配置和选项中构建一个logger
+ if err != nil {
+ return err
+ }
+ cfg.ZapLoggerBuilder = NewZapLoggerBuilder(lg)
+ }
+ } else {
+ if len(cfg.LogOutputs) > 1 {
+ for _, v := range cfg.LogOutputs {
+ if v != DefaultLogOutput {
+ return fmt.Errorf("运行systemd/journal,但其他 '--log-outputs' values (%q) 被配置为 'default'; 用其他的值重写 'default'", cfg.LogOutputs)
+ }
+ }
+ }
+
+ // 使用stderr作为后备方案
+ syncer, lerr := getJournalWriteSyncer()
+ if lerr != nil {
+ return lerr
+ }
+
+ lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
+
+ // WARN: 不要改变encoder 配置中的字段名 journald日志编写者假定字段名为"level" and "caller"
+ cr := zapcore.NewCore(
+ zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig),
+ syncer, lvl,
+ )
+ if cfg.ZapLoggerBuilder == nil {
+ cfg.ZapLoggerBuilder = NewZapLoggerBuilder(zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer)))
+ }
+ }
+
+ err := cfg.ZapLoggerBuilder(cfg)
+ if err != nil {
+ return err
+ }
+
+ logTLSHandshakeFailure := func(conn *tls.Conn, err error) {
+ // 记录tls握手失败
+ state := conn.ConnectionState()
+ remoteAddr := conn.RemoteAddr().String()
+ serverName := state.ServerName
+ if len(state.PeerCertificates) > 0 {
+ cert := state.PeerCertificates[0]
+ ips := make([]string, len(cert.IPAddresses))
+ for i := range cert.IPAddresses {
+ ips[i] = cert.IPAddresses[i].String()
+ }
+ cfg.logger.Warn(
+ "拒绝连接",
+ zap.String("remote-addr", remoteAddr),
+ zap.String("etcd-name", serverName),
+ zap.Strings("ip-addresses", ips),
+ zap.Strings("dns-names", cert.DNSNames),
+ zap.Error(err),
+ )
+ } else {
+ cfg.logger.Warn(
+ "拒绝连接",
+ zap.String("remote-addr", remoteAddr),
+ zap.String("etcd-name", serverName),
+ zap.Error(err),
+ )
+ }
+ }
+ cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
+ cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
+
+ default:
+ return fmt.Errorf("未知的Logger选项 %q", cfg.Logger)
+ }
+
+ return nil
+}
+
+// NewZapLoggerBuilder 生成一个zap logger builder,为embedded etcd设置给定的loger.
+func NewZapLoggerBuilder(lg *zap.Logger) func(*Config) error {
+ return func(cfg *Config) error {
+ cfg.loggerMu.Lock()
+ defer cfg.loggerMu.Unlock()
+ cfg.logger = lg
+ return nil
+ }
+}
+
+// SetupGlobalLoggers 配置全loggers (grpc, zapGlobal)基于cfg
+// 该方法默认不被embed etcd执行(从3.5开始),以实现grpc/zap.Global日志独立配置或跨越独立生命周期的设置(如测试).
+func (cfg *Config) SetupGlobalLoggers() {
+ lg := cfg.GetLogger()
+ if lg != nil {
+ if cfg.LogLevel == "debug" {
+ grpc.EnableTracing = true
+ grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
+ } else {
+ grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
+ }
+ zap.ReplaceGlobals(lg)
+ }
+}
+
+type logRotationConfig struct {
+ *lumberjack.Logger
+}
+
+// Sync implements zap.Sink
+func (logRotationConfig) Sync() error { return nil }
+
+// setupLogRotation 初始化单个文件路径目标的日志旋转.
+func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error {
+ var logRotationConfig logRotationConfig
+ outputFilePaths := 0
+ for _, v := range logOutputs {
+ switch v {
+ case DefaultLogOutput, StdErrLogOutput, StdOutLogOutput:
+ continue
+ default:
+ outputFilePaths++
+ }
+ }
+ // 日志旋转需要文件目标
+ if len(logOutputs) == 1 && outputFilePaths == 0 {
+ return ErrLogRotationInvalidLogOutput
+ }
+ // support max 1 file target for log rotation
+ if outputFilePaths > 1 {
+ return ErrLogRotationInvalidLogOutput
+ }
+
+ if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationConfig); err != nil {
+ var unmarshalTypeError *json.UnmarshalTypeError
+ var syntaxError *json.SyntaxError
+ switch {
+ case errors.As(err, &syntaxError):
+ return fmt.Errorf("improperly formatted log rotation config: %w", err)
+ case errors.As(err, &unmarshalTypeError):
+ return fmt.Errorf("invalid log rotation config: %w", err)
+ }
+ }
+ zap.RegisterSink("rotate", func(u *url.URL) (zap.Sink, error) {
+ logRotationConfig.Filename = u.Path[1:]
+ return &logRotationConfig, nil
+ })
+ return nil
+}
diff --git a/server/embed/config_logging_journal_unix.go b/etcd/embed/config_logging_journal_unix.go
similarity index 92%
rename from server/embed/config_logging_journal_unix.go
rename to etcd/embed/config_logging_journal_unix.go
index 75d83ff2b55..e9bd844329a 100644
--- a/server/embed/config_logging_journal_unix.go
+++ b/etcd/embed/config_logging_journal_unix.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build !windows
+// +build !windows
package embed
@@ -20,7 +21,7 @@ import (
"fmt"
"os"
- "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil"
"go.uber.org/zap/zapcore"
)
diff --git a/server/embed/config_logging_journal_windows.go b/etcd/embed/config_logging_journal_windows.go
similarity index 97%
rename from server/embed/config_logging_journal_windows.go
rename to etcd/embed/config_logging_journal_windows.go
index 90dfad944e4..58ed08631bb 100644
--- a/server/embed/config_logging_journal_windows.go
+++ b/etcd/embed/config_logging_journal_windows.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build windows
+// +build windows
package embed
diff --git a/etcd/embed/doc.go b/etcd/embed/doc.go
new file mode 100644
index 00000000000..735f8da00a7
--- /dev/null
+++ b/etcd/embed/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package embed provides bindings for embedding an etcd etcd in a program.
+
+Launch an embedded etcd etcd using the configuration defaults:
+
+ import (
+ "log"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/embed"
+ )
+
+ func main() {
+ cfg := embed.NewConfig()
+ cfg.Dir = "default.etcd"
+ e, err := embed.StartEtcd(cfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer e.Close()
+ select {
+ case <-e.Server.ReadyNotify():
+ log.Printf("Server is ready!")
+ case <-time.After(60 * time.Second):
+ e.Server.Stop() // trigger a shutdown
+ log.Printf("Server took too long to start!")
+ }
+ log.Fatal(<-e.Err())
+ }
+*/
+package embed
diff --git a/etcd/embed/etcd.go b/etcd/embed/etcd.go
new file mode 100644
index 00000000000..ee911216888
--- /dev/null
+++ b/etcd/embed/etcd.go
@@ -0,0 +1,848 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "io/ioutil"
+ defaultLog "log"
+ "net"
+ "net/http"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2v3"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3client"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/config"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3rpc"
+ "github.com/ls-2018/etcd_cn/etcd/verify"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ "github.com/ls-2018/etcd_cn/pkg/debugutil"
+ runtimeutil "github.com/ls-2018/etcd_cn/pkg/runtime"
+
+ grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+ "github.com/soheilhy/cmux"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.opentelemetry.io/otel/exporters/otlp"
+ "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ "go.opentelemetry.io/otel/semconv"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/keepalive"
+)
+
+const (
+ // internal fd usage includes disk usage and transport usage.
+ // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
+ // at most 2 to read/lock/write WALs. One case that it needs to 2 is to
+ // read all logs after some snapshot index, which locates at the end of
+ // the second last and the head of the last. For purging, it needs to read
+ // directory, so it needs 1. For fd monitor, it needs 1.
+ // For transport, rafthttp builds two long-polling connections and at most
+ // four temporary connections with each member. There are at most 9 members
+ // in a cluster, so it should reserve 96.
+ // For the safety, we set the total reserved number to 150.
+ reservedInternalFDNum = 150
+)
+
+// Etcd 包含一个正在运行的etcd etcd和它的监听器.
+type Etcd struct {
+ Peers []*peerListener
+ Clients []net.Listener
+ // 本机节点监听本地网卡的map 例如 localhost:2379 127.0.0.1:2379 0.0.0.0:2379 等等
+ sctxs map[string]*serveCtx
+ metricsListeners []net.Listener
+ tracingExporterShutdown func()
+ Server *etcdserver.EtcdServer
+ cfg Config
+ stopc chan struct{} // raft 停止,消息通道
+ errc chan error // 接收运行过程中产生的err
+ closeOnce sync.Once
+}
+
+// 每个server的Listener
+type peerListener struct {
+ net.Listener
+ serve func() error
+ close func(context.Context) error // 替换为net.Listener.Close()
+}
+
+// StartEtcd 启动 用于客户端/etcd通信的 `etcd和HTTP处理程序` .不保证返回的Etcd.Server已经加入集群.
+// 等待Etcd.Server.ReadyNotify()通道,以了解它何时完成并可以使用.
+func StartEtcd(inCfg *Config) (e *Etcd, err error) {
+ if err = inCfg.Validate(); err != nil {
+ return nil, err
+ }
+ serving := false
+ e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
+ cfg := &e.cfg
+ defer func() {
+ if e == nil || err == nil {
+ return
+ }
+ if !serving {
+ // 在为serveCtx.servicesC启动gRPC etcd之前出现错误.
+ for _, sctx := range e.sctxs {
+ close(sctx.serversC)
+ }
+ }
+ e.Close() // 启动失败时, 优雅关闭
+ e = nil
+ }()
+
+ if !cfg.SocketOpts.Empty() {
+ cfg.logger.Info("配置socket选项", zap.Bool("reuse-address", cfg.SocketOpts.ReuseAddress), zap.Bool("reuse-port", cfg.SocketOpts.ReusePort))
+ }
+ e.cfg.logger.Info("", zap.Strings("listen-peer-urls", e.cfg.getLPURLs()))
+ // 设置每个server listener 的超时时间、证书、socket选项
+ if e.Peers, err = configurePeerListeners(cfg); err != nil {
+ return e, err
+ }
+
+ e.cfg.logger.Info("配置peer listener", zap.Strings("listen-client-urls", e.cfg.getLCURLs()))
+ // 设置每个client listener 的超时时间、证书、socket选项
+ if e.sctxs, err = configureClientListeners(cfg); err != nil {
+ return e, err
+ }
+
+ for _, sctx := range e.sctxs {
+ e.Clients = append(e.Clients, sctx.l)
+ }
+
+ var (
+ urlsmap types.URLsMap
+ token string
+ )
+ // 成员初始化
+ memberInitialized := true
+ if !isMemberInitialized(cfg) { // 判断wal目录存不存在
+ memberInitialized = false
+ urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd") // token {name:urls[]}
+ if err != nil {
+ return e, fmt.Errorf("设置初始化集群出错: %v", err)
+ }
+ }
+ // 自动压缩配置
+ if len(cfg.AutoCompactionRetention) == 0 { // 没有设置
+ cfg.AutoCompactionRetention = "0"
+ }
+ // 根据压缩类型、压缩配置 返回时间、或条数
+ autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention)
+ if err != nil {
+ return e, err
+ }
+ // 返回boltdb存储的数据类型,array \ map
+ backendFreelistType := parseBackendFreelistType(cfg.BackendFreelistType)
+
+ srvcfg := config.ServerConfig{
+ Name: cfg.Name,
+ ClientURLs: cfg.ACUrls,
+ PeerURLs: cfg.APUrls,
+ DataDir: cfg.Dir,
+ DedicatedWALDir: cfg.WalDir,
+ SnapshotCount: cfg.SnapshotCount, // 触发一次磁盘快照的提交事务的次数
+ SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries, // 快照追赶数据量
+ MaxSnapFiles: cfg.MaxSnapFiles,
+ MaxWALFiles: cfg.MaxWalFiles, // 要保留的最大wal文件数(0表示不受限制). 5
+ InitialPeerURLsMap: urlsmap, // 节点--> url
+ InitialClusterToken: token,
+ DiscoveryURL: cfg.Durl,
+ DiscoveryProxy: cfg.Dproxy,
+ NewCluster: cfg.IsNewCluster(), // new existing
+ PeerTLSInfo: cfg.PeerTLSInfo, // server 证书信息
+ TickMs: cfg.TickMs, // tick计时器触发间隔
+ ElectionTicks: cfg.ElectionTicks(), // 返回选举权检查对应多少次tick触发次数
+ InitialElectionTickAdvance: cfg.InitialElectionTickAdvance, // 是否提前初始化选举时钟启动,以便更快的选举
+ AutoCompactionRetention: autoCompactionRetention, // 自动压缩值
+ AutoCompactionMode: cfg.AutoCompactionMode, // 自动压缩模式
+ QuotaBackendBytes: cfg.QuotaBackendBytes, // 资源存储阈值
+ BackendBatchLimit: cfg.BoltBackendBatchLimit, // BackendBatchLimit是提交后端事务前的最大操作数
+ BackendFreelistType: backendFreelistType, // 返回boltdb存储的数据类型
+ BackendBatchInterval: cfg.BoltBackendBatchInterval, // BackendBatchInterval是提交后端事务前的最长时间.
+ MaxTxnOps: cfg.MaxTxnOps,
+ MaxRequestBytes: cfg.MaxRequestBytes, // 服务器将接受的最大客户端请求大小(字节).
+ SocketOpts: cfg.SocketOpts,
+ StrictReconfigCheck: cfg.StrictReconfigCheck, // 严格配置变更检查
+ ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
+ AuthToken: cfg.AuthToken, // 认证格式 simple、jwt
+ BcryptCost: cfg.BcryptCost, // 为散列身份验证密码指定bcrypt算法的成本/强度
+ TokenTTL: cfg.AuthTokenTTL,
+ CORS: cfg.CORS,
+ HostWhitelist: cfg.HostWhitelist,
+ InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck, // 数据毁坏检测功能
+ CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
+ PreVote: cfg.PreVote, // PreVote 是否启用PreVote
+ Logger: cfg.logger,
+ ForceNewCluster: cfg.ForceNewCluster,
+ EnableGRPCGateway: cfg.EnableGRPCGateway, // 启用grpc网关,将 http 转换成 grpc / true
+ ExperimentalEnableDistributedTracing: cfg.ExperimentalEnableDistributedTracing, // 默认false
+ UnsafeNoFsync: cfg.UnsafeNoFsync,
+ EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint, // 允许leader定期向其他成员发送检查点,以防止leader变化时剩余TTL重置.
+ LeaseCheckpointPersist: cfg.ExperimentalEnableLeaseCheckpointPersist,
+ CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
+ WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval,
+ DowngradeCheckTime: cfg.ExperimentalDowngradeCheckTime, // 两次降级状态检查之间的时间间隔.
+ WarningApplyDuration: cfg.ExperimentalWarningApplyDuration, // 是时间长度.如果应用请求的时间超过这个值.就会产生一个警告.
+ ExperimentalMemoryMlock: cfg.ExperimentalMemoryMlock,
+ ExperimentalTxnModeWriteWithSharedBuffer: cfg.ExperimentalTxnModeWriteWithSharedBuffer,
+ ExperimentalBootstrapDefragThresholdMegabytes: cfg.ExperimentalBootstrapDefragThresholdMegabytes,
+ }
+
+ if srvcfg.ExperimentalEnableDistributedTracing { // 使用OpenTelemetry协议实现分布式跟踪.默认false
+ tctx := context.Background()
+ tracingExporter, opts, err := e.setupTracing(tctx)
+ if err != nil {
+ return e, err
+ }
+ if tracingExporter == nil || len(opts) == 0 {
+ return e, fmt.Errorf("error setting up distributed tracing")
+ }
+ e.tracingExporterShutdown = func() { tracingExporter.Shutdown(tctx) }
+ srvcfg.ExperimentalTracerOptions = opts
+ }
+
+ print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
+
+ // TODO 在看
+ if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
+ return e, err
+ }
+
+ // buffer channel so goroutines on closed connections won't wait forever
+ e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
+
+ // newly started member ("memberInitialized==false")
+ // does not need corruption check
+ if memberInitialized {
+ if err = e.Server.CheckInitialHashKV(); err != nil {
+ // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
+ // (nothing to close since rafthttp transports have not been started)
+
+ e.cfg.logger.Error("checkInitialHashKV failed", zap.Error(err))
+ e.Server.Cleanup()
+ e.Server = nil
+ return e, err
+ }
+ }
+ e.Server.Start()
+
+ if err = e.servePeers(); err != nil {
+ return e, err
+ }
+ if err = e.serveClients(); err != nil {
+ return e, err
+ }
+ if err = e.serveMetrics(); err != nil { // ✅
+ return e, err
+ }
+
+ e.cfg.logger.Info(
+ "启动服务 peer/client/metrics",
+ zap.String("local-member-id", e.Server.ID().String()),
+ zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()),
+ zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), // 集群节点之间通信监听的URL;如果指定的IP是0.0.0.0,那么etcd 会监昕所有网卡的指定端口
+ zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
+ zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
+ zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()),
+ )
+ serving = true
+ return e, nil
+}
+
+func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized bool) {
+ cors := make([]string, 0, len(ec.CORS))
+ for v := range ec.CORS {
+ cors = append(cors, v)
+ }
+ sort.Strings(cors)
+
+ hss := make([]string, 0, len(ec.HostWhitelist))
+ for v := range ec.HostWhitelist {
+ hss = append(hss, v)
+ }
+ sort.Strings(hss)
+
+ quota := ec.QuotaBackendBytes
+ if quota == 0 {
+ quota = etcdserver.DefaultQuotaBytes
+ }
+
+ fmt.Println("------->",
+ zap.String("etcd-version", version.Version),
+ zap.String("git-sha", version.GitSHA),
+ zap.String("go-version", runtime.Version()),
+ zap.String("go-os", runtime.GOOS),
+ zap.String("go-arch", runtime.GOARCH),
+ zap.Int("max-cpu-set", runtime.GOMAXPROCS(0)),
+ zap.Int("max-cpu-available", runtime.NumCPU()),
+ zap.Bool("member-initialized", memberInitialized),
+ zap.String("name", sc.Name),
+ zap.String("data-dir", sc.DataDir),
+ zap.String("wal-dir", ec.WalDir),
+ zap.String("wal-dir-dedicated", sc.DedicatedWALDir),
+ zap.String("member-dir", sc.MemberDir()),
+ zap.Bool("force-new-cluster", sc.ForceNewCluster),
+ zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)),
+ zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)),
+ zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance), // 是否提前初始化选举时钟启动,以便更快的选举
+ zap.Uint64("snapshot-count", sc.SnapshotCount), // 触发一次磁盘快照的提交事务的次数
+ zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
+ zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()),
+ zap.Strings("listen-peer-urls", ec.getLPURLs()), // 集群节点之间通信监听的URL;如果指定的IP是0.0.0.0,那么etcd 会监昕所有网卡的指定端口
+ zap.Strings("advertise-client-urls", ec.getACURLs()),
+ zap.Strings("listen-client-urls", ec.getLCURLs()),
+ zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
+ zap.Strings("cors", cors),
+ zap.Strings("host-whitelist", hss),
+ zap.String("initial-cluster", sc.InitialPeerURLsMap.String()),
+ zap.String("initial-cluster-state", ec.ClusterState),
+ zap.String("initial-cluster-token", sc.InitialClusterToken),
+ zap.Int64("quota-size-bytes", quota),
+ zap.Bool("pre-vote", sc.PreVote),
+ zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck),
+ zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()),
+ zap.String("auto-compaction-mode", sc.AutoCompactionMode),
+ zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention),
+ zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()),
+ zap.String("discovery-url", sc.DiscoveryURL),
+ zap.String("discovery-proxy", sc.DiscoveryProxy),
+ zap.String("downgrade-check-interval", sc.DowngradeCheckTime.String()),
+ )
+}
+
+// Config returns the current configuration.
+func (e *Etcd) Config() Config {
+ return e.cfg
+}
+
+// Close 优雅关闭server 以及所有链接
+// 客户端请求在超时之后会终止,之后会被关闭
+func (e *Etcd) Close() {
+ fields := []zap.Field{
+ zap.String("name", e.cfg.Name),
+ zap.String("data-dir", e.cfg.Dir),
+ zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()),
+ zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
+ }
+ lg := e.GetLogger()
+ lg.Info("关闭etcd ing...", fields...)
+ defer func() {
+ lg.Info("关闭etcd", fields...)
+ verify.MustVerifyIfEnabled(verify.Config{Logger: lg, DataDir: e.cfg.Dir, ExactIndex: false})
+ lg.Sync() // log都刷到磁盘
+ }()
+
+ e.closeOnce.Do(func() {
+ close(e.stopc)
+ })
+
+ // 使用请求超时关闭客户端请求
+ timeout := 2 * time.Second
+ if e.Server != nil {
+ timeout = e.Server.Cfg.ReqTimeout()
+ }
+ for _, sctx := range e.sctxs {
+ for ss := range sctx.serversC {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ stopServers(ctx, ss)
+ cancel()
+ }
+ }
+
+ for _, sctx := range e.sctxs {
+ sctx.cancel()
+ }
+
+ for i := range e.Clients {
+ if e.Clients[i] != nil {
+ e.Clients[i].Close()
+ }
+ }
+
+ for i := range e.metricsListeners {
+ e.metricsListeners[i].Close()
+ }
+
+ // shutdown tracing exporter
+ if e.tracingExporterShutdown != nil {
+ e.tracingExporterShutdown()
+ }
+
+ // 关闭 rafthttp transports
+ if e.Server != nil {
+ e.Server.Stop()
+ }
+
+ // close all idle connections in peer handler (wait up to 1-second)
+ for i := range e.Peers {
+ if e.Peers[i] != nil && e.Peers[i].close != nil {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ e.Peers[i].close(ctx)
+ cancel()
+ }
+ }
+ if e.errc != nil {
+ close(e.errc)
+ }
+}
+
+func stopServers(ctx context.Context, ss *servers) {
+ // first, close the http.Server
+ ss.http.Shutdown(ctx)
+ // do not grpc.Server.GracefulStop with TLS enabled etcd etcd
+ // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
+ // and https://github.com/etcd-io/etcd/issues/8916
+ if ss.secure {
+ ss.grpc.Stop()
+ return
+ }
+
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ // close listeners to stop accepting new connections,
+ // will block on any existing transports
+ ss.grpc.GracefulStop()
+ }()
+
+ // wait until all pending RPCs are finished
+ select {
+ case <-ch:
+ case <-ctx.Done():
+ // took too long, manually close open transports
+ // e.g. watch streams
+ ss.grpc.Stop()
+
+ // concurrent GracefulStop should be interrupted
+ <-ch
+ }
+}
+
+// Err - return channel used to report errors during etcd run/shutdown.
+// Since etcd 3.5 the channel is being closed when the etcd is over.
+func (e *Etcd) Err() <-chan error {
+ return e.errc
+}
+
+// 配置 peer listeners
+func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
+ // 更新密码套件
+ if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
+ return nil, err
+ }
+
+ if err = cfg.PeerSelfCert(); err != nil {
+ cfg.logger.Fatal("未能获得peer的自签名证书", zap.Error(err))
+ }
+ if !cfg.PeerTLSInfo.Empty() {
+ cfg.logger.Info(
+ "从peer的TLS开始",
+ zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo)),
+ zap.Strings("cipher-suites", cfg.CipherSuites),
+ )
+ }
+
+ peers = make([]*peerListener, len(cfg.LPUrls))
+ defer func() {
+ if err == nil {
+ return
+ }
+ for i := range peers {
+ if peers[i] != nil && peers[i].close != nil {
+ cfg.logger.Warn(
+ "关闭节点listener",
+ zap.String("address", cfg.LPUrls[i].String()),
+ zap.Error(err),
+ )
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ peers[i].close(ctx)
+ cancel()
+ }
+ }
+ }()
+
+ for i, u := range cfg.LPUrls {
+ if u.Scheme == "http" {
+ if !cfg.PeerTLSInfo.Empty() {
+ cfg.logger.Warn("在钥匙和证书文件存在的情况下,方案为HTTP;忽略钥匙和证书文件", zap.String("peer-url", u.String()))
+ }
+ if cfg.PeerTLSInfo.ClientCertAuth {
+ cfg.logger.Warn("方案为HTTP;当启用 --peer-client-cert-auth;忽略钥匙和证书文件", zap.String("peer-url", u.String()))
+ }
+ }
+ // 构造peerListener对象 监听2380 作为服务端模式
+ peers[i] = &peerListener{close: func(context.Context) error { return nil }}
+ // 调用接口,创建listener对象,返回来之后,
+ // socket套接字已经完成listener监听流程
+ peers[i].Listener, err = transport.NewListenerWithOpts(u.Host, u.Scheme,
+ transport.WithTLSInfo(&cfg.PeerTLSInfo),
+ transport.WithSocketOpts(&cfg.SocketOpts),
+ transport.WithTimeout(rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout),
+ )
+ if err != nil {
+ return nil, err
+ }
+ //
+ peers[i].close = func(context.Context) error {
+ return peers[i].Listener.Close()
+ }
+ }
+ return peers, nil
+}
+
+// 在rafthttp.Transport启动后配置对等处理程序
+func (e *Etcd) servePeers() (err error) {
+ // 生成http.hander 用于处理peer请求
+ httpHandler := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server)
+ var peerTLScfg *tls.Config
+ if !e.cfg.PeerTLSInfo.Empty() {
+ if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil {
+ return err
+ }
+ }
+
+ for _, p := range e.Peers {
+
+ u := p.Listener.Addr().String()
+ grpcServer := v3rpc.Server(e.Server, peerTLScfg, nil)
+ m := cmux.New(p.Listener)
+ go grpcServer.Serve(m.Match(cmux.HTTP2())) // 基于http2 tcp://127.0.0.1:2380
+
+ httpServer := &http.Server{
+ Handler: grpcHandlerFunc(grpcServer, httpHandler),
+ ReadTimeout: 5 * time.Minute,
+ ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
+ }
+ go httpServer.Serve(m.Match(cmux.Any())) // http1
+
+ p.serve = func() error {
+ // 回调函数,激活服务,主要是Accept方法
+ e.cfg.logger.Info("cmux::serve", zap.String("address", u))
+ return m.Serve()
+ }
+
+ p.close = func(ctx context.Context) error {
+ // 优雅关闭 http.Server、打开的listeners、空闲的connections 直到超时或上下文关闭
+ e.cfg.logger.Info("开始停止服务", zap.String("address", u))
+ stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: grpcServer, http: httpServer})
+ e.cfg.logger.Info("已停止服务", zap.String("address", u))
+ m.Close()
+ return nil
+ }
+ }
+
+ // start peer servers in a goroutine
+ for _, pl := range e.Peers {
+ go func(l *peerListener) {
+ u := l.Addr().String()
+ e.cfg.logger.Info(
+ "serving peer traffic",
+ zap.String("address", u),
+ )
+ e.errHandler(l.serve())
+ }(pl)
+ }
+ return nil
+}
+
+// 配置与etcdctl客户端的listener选项
+func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
+ // 更新密码套件
+ if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
+ return nil, err
+ }
+ // LCURLS 自签证书
+ if err = cfg.ClientSelfCert(); err != nil {
+ cfg.logger.Fatal("未能获得客户自签名的证书", zap.Error(err))
+ }
+ if cfg.EnablePprof {
+ cfg.logger.Info("允许性能分析", zap.String("path", debugutil.HTTPPrefixPProf))
+ }
+
+ sctxs = make(map[string]*serveCtx)
+ for _, u := range cfg.LCUrls {
+ sctx := newServeCtx(cfg.logger)
+ if u.Scheme == "http" || u.Scheme == "unix" {
+ if !cfg.ClientTLSInfo.Empty() {
+ cfg.logger.Warn("在钥匙和证书文件存在的情况下,方案为HTTP;忽略钥匙和证书文件", zap.String("client-url", u.String()))
+ }
+ if cfg.ClientTLSInfo.ClientCertAuth {
+ cfg.logger.Warn("方案是HTTP,同时启用了-客户证书认证;该URL忽略了客户证书认证.", zap.String("client-url", u.String()))
+ }
+ }
+ if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
+ return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file)必须提供,当协议是%q", u.String())
+ }
+
+ network := "tcp"
+ addr := u.Host
+ if u.Scheme == "unix" || u.Scheme == "unixs" {
+ network = "unix"
+ addr = u.Host + u.Path
+ }
+ sctx.network = network
+
+ sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
+ sctx.insecure = !sctx.secure // 在处理etcdctl 请求上,是不是启用证书
+ if oldctx := sctxs[addr]; oldctx != nil {
+ oldctx.secure = oldctx.secure || sctx.secure
+ oldctx.insecure = oldctx.insecure || sctx.insecure
+ continue
+ }
+
+ if sctx.l, err = transport.NewListenerWithOpts(addr, u.Scheme,
+ transport.WithSocketOpts(&cfg.SocketOpts),
+ transport.WithSkipTLSInfoCheck(true),
+ ); err != nil {
+ return nil, err
+ }
+ // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
+ // hosts that disable ipv6. So, use the address given by the user.
+ sctx.addr = addr
+
+ if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
+ if fdLimit <= reservedInternalFDNum {
+ cfg.logger.Fatal(
+ "file descriptor limit of etcd process is too low; please set higher",
+ zap.Uint64("limit", fdLimit),
+ zap.Int("recommended-limit", reservedInternalFDNum),
+ )
+ }
+ sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
+ }
+
+ if network == "tcp" {
+ if sctx.l, err = transport.NewKeepAliveListener(sctx.l, network, nil); err != nil {
+ return nil, err
+ }
+ }
+
+ defer func(u url.URL) {
+ if err == nil {
+ return
+ }
+ sctx.l.Close()
+ cfg.logger.Warn("关闭peer listener", zap.String("address", u.Host), zap.Error(err))
+ }(u)
+ for k := range cfg.UserHandlers {
+ sctx.userHandlers[k] = cfg.UserHandlers[k]
+ }
+ sctx.serviceRegister = cfg.ServiceRegister
+ if cfg.EnablePprof || cfg.LogLevel == "debug" {
+ sctx.registerPprof()
+ }
+ if cfg.LogLevel == "debug" {
+ sctx.registerTrace()
+ }
+ sctxs[addr] = sctx
+ }
+ return sctxs, nil
+}
+
+// OK
+func (e *Etcd) serveClients() (err error) {
+ if !e.cfg.ClientTLSInfo.Empty() {
+ e.cfg.logger.Info(
+ "使用证书启动client",
+ zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)),
+ zap.Strings("cipher-suites", e.cfg.CipherSuites),
+ )
+ }
+
+ // Start a client server goroutine for each listen address
+ var h http.Handler
+ if e.Config().EnableV2 {
+ if e.Config().V2DeprecationEffective().IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) {
+ return fmt.Errorf("--enable-v2 and --v2-deprecation=%s are mutually exclusive", e.Config().V2DeprecationEffective())
+ }
+ e.cfg.logger.Warn("Flag `enable-v2` is deprecated and will get removed in etcd 3.6.")
+ if len(e.Config().ExperimentalEnableV2V3) > 0 {
+ e.cfg.logger.Warn("Flag `experimental-enable-v2v3` is deprecated and will get removed in etcd 3.6.")
+ srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
+ h = v2http.NewClientHandler(e.GetLogger(), srv, e.Server.Cfg.ReqTimeout())
+ } else {
+ h = v2http.NewClientHandler(e.GetLogger(), e.Server, e.Server.Cfg.ReqTimeout())
+ }
+ } else {
+ mux := http.NewServeMux()
+ etcdhttp.HandleBasic(e.cfg.logger, mux, e.Server)
+ etcdhttp.HandleMetricsHealthForV3(e.cfg.logger, mux, e.Server)
+ h = mux
+ }
+
+ mux := http.NewServeMux() // ✅
+ etcdhttp.HandleBasic(e.cfg.logger, mux, e.Server) // ✅
+ h = mux
+
+ var gopts []grpc.ServerOption
+ if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: e.cfg.GRPCKeepAliveMinTime,
+ PermitWithoutStream: false, // 默认false
+ // 如果是true,即使没有活动流(RPCs),服务器也允许keepalive pings.如果是假的,客户端在没有活动流的情况下发送ping 流,服务器将发送GOAWAY并关闭连接.
+ }))
+ }
+ if e.cfg.GRPCKeepAliveInterval > time.Duration(0) && e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: e.cfg.GRPCKeepAliveInterval,
+ Timeout: e.cfg.GRPCKeepAliveTimeout,
+ }))
+ }
+
+ // 启动每一个监听网卡的程序
+ for _, sctx := range e.sctxs {
+ go func(s *serveCtx) {
+ e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
+ }(sctx)
+ }
+ return nil
+}
+
+func (e *Etcd) serveMetrics() (err error) {
+ if e.cfg.Metrics == "extensive" { // basic
+ grpc_prometheus.EnableHandlingTimeHistogram()
+ }
+ // 长度为0, 监听etcd ctl客户端请求
+ if len(e.cfg.ListenMetricsUrls) > 0 {
+ for _, murl := range e.cfg.ListenMetricsUrls {
+ tlsInfo := &e.cfg.ClientTLSInfo
+ if murl.Scheme == "http" {
+ tlsInfo = nil
+ }
+ ml, err := transport.NewListenerWithOpts(murl.Host, murl.Scheme,
+ transport.WithTLSInfo(tlsInfo),
+ transport.WithSocketOpts(&e.cfg.SocketOpts),
+ )
+ if err != nil {
+ return err
+ }
+ e.metricsListeners = append(e.metricsListeners, ml)
+ go func(u url.URL, ln net.Listener) {
+ e.cfg.logger.Info(
+ "serving metrics",
+ zap.String("address", u.String()),
+ )
+ }(murl, ml)
+ }
+ }
+ return nil
+}
+
+// 处理err
+func (e *Etcd) errHandler(err error) {
+ select {
+ case <-e.stopc:
+ return
+ default:
+ }
+ // 一般都卡在这
+ select {
+ case <-e.stopc:
+ case e.errc <- err:
+ }
+}
+
+// GetLogger returns the logger.
+func (e *Etcd) GetLogger() *zap.Logger {
+ e.cfg.loggerMu.RLock()
+ l := e.cfg.logger
+ e.cfg.loggerMu.RUnlock()
+ return l
+}
+
+// 解析返回条数、时间
+func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
+ h, err := strconv.Atoi(retention)
+ if err == nil && h >= 0 {
+ switch mode {
+ case CompactorModeRevision:
+ ret = time.Duration(int64(h))
+ case CompactorModePeriodic:
+ ret = time.Duration(int64(h)) * time.Hour
+ }
+ } else {
+ // 周期性压缩
+ ret, err = time.ParseDuration(retention)
+ if err != nil {
+ return 0, fmt.Errorf("解析失败CompactionRetention: %v", err)
+ }
+ }
+ return ret, nil
+}
+
+func (e *Etcd) setupTracing(ctx context.Context) (exporter tracesdk.SpanExporter, options []otelgrpc.Option, err error) {
+ exporter, err = otlp.NewExporter(ctx,
+ otlpgrpc.NewDriver(
+ otlpgrpc.WithEndpoint(e.cfg.ExperimentalDistributedTracingAddress),
+ otlpgrpc.WithInsecure(),
+ ))
+ if err != nil {
+ return nil, nil, err
+ }
+ res := resource.NewWithAttributes(
+ semconv.ServiceNameKey.String(e.cfg.ExperimentalDistributedTracingServiceName),
+ )
+ // As Tracing service Instance ID必须是unique, it should
+ // never use the empty default string value, so we only set it
+ // if it's a non empty string.
+ if e.cfg.ExperimentalDistributedTracingServiceInstanceID != "" {
+ resWithIDKey := resource.NewWithAttributes(
+ (semconv.ServiceInstanceIDKey.String(e.cfg.ExperimentalDistributedTracingServiceInstanceID)),
+ )
+ // Merge resources to combine into a new
+ // resource in case of duplicates.
+ res = resource.Merge(res, resWithIDKey)
+ }
+
+ options = append(options,
+ otelgrpc.WithPropagators(
+ propagation.NewCompositeTextMapPropagator(
+ propagation.TraceContext{},
+ propagation.Baggage{},
+ ),
+ ),
+ otelgrpc.WithTracerProvider(
+ tracesdk.NewTracerProvider(
+ tracesdk.WithBatcher(exporter),
+ tracesdk.WithResource(res),
+ ),
+ ),
+ )
+
+ e.cfg.logger.Info(
+ "distributed tracing enabled",
+ zap.String("distributed-tracing-address", e.cfg.ExperimentalDistributedTracingAddress),
+ zap.String("distributed-tracing-service-name", e.cfg.ExperimentalDistributedTracingServiceName),
+ zap.String("distributed-tracing-service-instance-id", e.cfg.ExperimentalDistributedTracingServiceInstanceID),
+ )
+
+ return exporter, options, err
+}
diff --git a/etcd/embed/inter.go b/etcd/embed/inter.go
new file mode 100644
index 00000000000..1a24e6d7b60
--- /dev/null
+++ b/etcd/embed/inter.go
@@ -0,0 +1,15 @@
+package embed
+
+import (
+ "net/http"
+
+ gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+)
+
+func mux() {
+ var _ etcdserver.EtcdServer
+ var _ gw.ServeMux
+ var _ http.ServeMux
+ var _ http.Handler // ServeHTTP方法
+}
diff --git a/etcd/embed/serve.go b/etcd/embed/serve.go
new file mode 100644
index 00000000000..f6d9744fd93
--- /dev/null
+++ b/etcd/embed/serve.go
@@ -0,0 +1,422 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ defaultLog "log"
+ "math"
+ "net"
+ "net/http"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/credentials"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3client"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb"
+ v3electiongw "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb/gw"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb"
+ v3lockgw "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb/gw"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3rpc"
+ etcdservergw "github.com/ls-2018/etcd_cn/offical/etcdserverpb/gw"
+ "github.com/ls-2018/etcd_cn/pkg/debugutil"
+ "github.com/ls-2018/etcd_cn/pkg/httputil"
+
+ gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/soheilhy/cmux"
+ "github.com/tmc/grpc-websocket-proxy/wsproxy"
+ "go.uber.org/zap"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc"
+)
+
+// 监听一个端口,提供服务, http, rpc
+type serveCtx struct {
+ lg *zap.Logger
+ l net.Listener // 单个监听本地网卡2379端口的listener
+ addr string
+ network string // tcp unix
+ secure bool // 安全的
+ insecure bool // 不安全的 // 在处理etcdctl 请求上,是不是启用证书 由 lcurl 的协议决定, 与secure相反
+
+ ctx context.Context
+ cancel context.CancelFunc
+
+ userHandlers map[string]http.Handler
+ serviceRegister func(*grpc.Server) // 预置的服务注册函数扩展
+ serversC chan *servers
+}
+
+type servers struct {
+ secure bool
+ grpc *grpc.Server
+ http *http.Server
+}
+
+// OK
+func newServeCtx(lg *zap.Logger) *serveCtx {
+ ctx, cancel := context.WithCancel(context.Background())
+ if lg == nil {
+ lg = zap.NewNop() // 不会输出的logger
+ }
+ return &serveCtx{
+ lg: lg,
+ ctx: ctx,
+ cancel: cancel,
+ userHandlers: make(map[string]http.Handler),
+ serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
+ }
+}
+
+// serve 为接收入站请求创建一个goroutine
+func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlsinfo *transport.TLSInfo, handler http.Handler, errHandler func(error),
+ gopts ...grpc.ServerOption,
+) (err error) {
+ logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
+ <-s.ReadyNotify() // 准备好了,该channel会被关闭
+
+ sctx.lg.Info("随时准备为客户的要求提供服务")
+ // 实例化 连接多路复用器.可以同时解析不同的协议,都跑在一个listener上
+ m := cmux.New(sctx.l)
+ v3c := v3client.New(s) // server的客户端,可以直接操作server
+ servElection := v3election.NewElectionServer(v3c)
+ servLock := v3lock.NewLockServer(v3c)
+
+ var gs *grpc.Server
+ defer func() {
+ if err != nil && gs != nil {
+ gs.Stop()
+ }
+ }()
+ // 不安全
+ if sctx.insecure {
+ gs = v3rpc.Server(s, nil, nil, gopts...) // 注册服务、链接参数
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ grpcListener := m.Match(cmux.HTTP2()) //
+
+ go func() { errHandler(gs.Serve(grpcListener)) }()
+
+ var gwmux *gw.ServeMux
+ // 启用grpc网关,将 http 转换成 grpc / true
+ if s.Cfg.EnableGRPCGateway {
+ gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()}) // ✅
+ if err != nil {
+ return err
+ }
+ }
+ // 该handler
+ httpmux := sctx.createMux(gwmux, handler) // http->grpc
+
+ srvhttp := &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux), // ✅
+ ErrorLog: logger,
+ }
+ httpl := m.Match(cmux.HTTP1())
+ go func() { errHandler(srvhttp.Serve(httpl)) }()
+
+ sctx.serversC <- &servers{grpc: gs, http: srvhttp}
+ sctx.lg.Info("以不安全的方式为客户流量提供服务;这是被强烈反对的.", zap.String("address", sctx.l.Addr().String()))
+ }
+
+ if sctx.secure {
+ tlscfg, tlsErr := tlsinfo.ServerConfig()
+ if tlsErr != nil {
+ return tlsErr
+ }
+ gs = v3rpc.Server(s, tlscfg, nil, gopts...)
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ handler = grpcHandlerFunc(gs, handler)
+
+ var gwmux *gw.ServeMux
+ if s.Cfg.EnableGRPCGateway {
+ dtls := tlscfg.Clone()
+ // trust local etcd
+ dtls.InsecureSkipVerify = true
+ bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls})
+ opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())}
+ gwmux, err = sctx.registerGateway(opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ var tlsl net.Listener
+ tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
+ if err != nil {
+ return err
+ }
+ // TODO: add debug flag; enable logging when debug flag is set
+ httpmux := sctx.createMux(gwmux, handler)
+
+ srv := &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux),
+ TLSConfig: tlscfg,
+ ErrorLog: logger, // do not log user error
+ }
+ go func() { errHandler(srv.Serve(tlsl)) }()
+
+ sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
+ sctx.lg.Info(
+ "serving client traffic securely",
+ zap.String("address", sctx.l.Addr().String()),
+ )
+ }
+
+ close(sctx.serversC)
+ return m.Serve()
+}
+
+type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
+
+// 注册网关 http 转换成 grpc / true
+func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
+ ctx := sctx.ctx
+
+ addr := sctx.addr
+ // tcp unix
+ if network := sctx.network; network == "unix" {
+ // 明确定义unix网络以支持gRPC套接字
+ addr = fmt.Sprintf("%s://%s", network, addr)
+ }
+
+ opts = append(opts, grpc.WithDefaultCallOptions([]grpc.CallOption{
+ grpc.MaxCallRecvMsgSize(math.MaxInt32),
+ }...))
+ // 与etcd 建立grpc连接
+ conn, err := grpc.DialContext(ctx, addr, opts...)
+ if err != nil {
+ return nil, err
+ }
+ gwmux := gw.NewServeMux()
+
+ handlers := []registerHandlerFunc{
+ etcdservergw.RegisterKVHandler, // 将grpc转换成了http
+ etcdservergw.RegisterWatchHandler,
+ etcdservergw.RegisterLeaseHandler,
+ etcdservergw.RegisterClusterHandler,
+ etcdservergw.RegisterMaintenanceHandler,
+ etcdservergw.RegisterAuthHandler,
+ v3lockgw.RegisterLockHandler,
+ v3electiongw.RegisterElectionHandler,
+ }
+ for _, h := range handlers {
+ if err := h(ctx, gwmux, conn); err != nil {
+ return nil, err
+ }
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ sctx.lg.Warn("关闭连接", zap.String("address", sctx.l.Addr().String()), zap.Error(cerr))
+ }
+ }()
+
+ return gwmux, nil
+}
+
+// OK 将http转换成grpc
+func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
+ httpmux := http.NewServeMux() // mux 数据选择器
+ for path, h := range sctx.userHandlers {
+ httpmux.Handle(path, h)
+ }
+
+ if gwmux != nil {
+ httpmux.Handle(
+ "/v3/",
+ wsproxy.WebsocketProxy(
+ gwmux,
+ wsproxy.WithRequestMutator(
+ // 默认为流的POST方法
+ func(_ *http.Request, outgoing *http.Request) *http.Request {
+ outgoing.Method = "POST"
+ return outgoing
+ },
+ ),
+ wsproxy.WithMaxRespBodyBufferSize(0x7fffffff),
+ ),
+ )
+ }
+ if handler != nil {
+ httpmux.Handle("/", handler)
+ }
+ return httpmux
+}
+
+// createAccessController包装了HTTP多路复用器.
+// - 突变gRPC 网关请求路径
+// - 检查主机名白名单
+// 客户端HTTP请求首先在这里进行
+func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &accessController{lg: lg, s: s, mux: mux}
+}
+
+type accessController struct {
+ lg *zap.Logger
+ s *etcdserver.EtcdServer //
+ mux *http.ServeMux
+}
+
+func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if req == nil {
+ http.Error(rw, "请求是空的", http.StatusBadRequest)
+ return
+ }
+ // 重定向以实现向后兼容
+ if req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3beta/") {
+ req.URL.Path = strings.Replace(req.URL.Path, "/v3beta/", "/v3/", 1)
+ }
+
+ if req.TLS == nil { // 如果客户端连接不安全,则检查origin
+ host := httputil.GetHostname(req) // 请求的主机名、域名、IP
+ if !ac.s.AccessController.IsHostWhitelisted(host) {
+ ac.lg.Warn("拒绝HTTP请求,以防止DNS重新绑定攻击", zap.String("host", host))
+ http.Error(rw, errCVE20185702(host), http.StatusMisdirectedRequest)
+ return
+ }
+ } else if ac.s.Cfg.ClientCertAuthEnabled && ac.s.Cfg.EnableGRPCGateway &&
+ ac.s.AuthStore().IsAuthEnabled() && strings.HasPrefix(req.URL.Path, "/v3/") {
+ // TODO 待看
+ for _, chains := range req.TLS.VerifiedChains {
+ if len(chains) < 1 {
+ continue
+ }
+ if len(chains[0].Subject.CommonName) != 0 {
+ http.Error(rw, "对网关发送请求的客户端的CommonName将被忽略,不按预期使用.", http.StatusBadRequest)
+ return
+ }
+ }
+ }
+
+ // 写Origin头
+ // 允不允许跨域
+ if ac.s.AccessController.OriginAllowed("*") {
+ addCORSHeader(rw, "*")
+ } else if origin := req.Header.Get("Origin"); ac.s.OriginAllowed(origin) {
+ addCORSHeader(rw, origin)
+ }
+
+ if req.Method == "OPTIONS" {
+ rw.WriteHeader(http.StatusOK)
+ return
+ }
+
+ ac.mux.ServeHTTP(rw, req)
+}
+
+// addCORSHeader 在给定Origin的情况下,添加正确的cors头信息.
+func addCORSHeader(w http.ResponseWriter, origin string) {
+ w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
+ w.Header().Add("Access-Control-Allow-Origin", origin)
+ w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization")
+}
+
+// https://github.com/transmission/transmission/pull/468
+func errCVE20185702(host string) string {
+ return fmt.Sprintf(`
+etcd received your request, but the Host header was unrecognized.
+
+To fix this, choose one of the following options:
+- Enable TLS, then any HTTPS request will be allowed.
+- Add the hostname you want to use to the whitelist in settings.
+ - e.g. etcd --host-whitelist %q
+
+This requirement has been added to help prevent "DNS Rebinding" attacks (CVE-2018-5702).
+`, host)
+}
+
+// WrapCORS wraps existing handler with CORS.
+// TODO: deprecate this after v2 proxy deprecate
+func WrapCORS(cors map[string]struct{}, h http.Handler) http.Handler {
+ return &corsHandler{
+ ac: &etcdserver.AccessController{CORS: cors},
+ h: h,
+ }
+}
+
+type corsHandler struct {
+ ac *etcdserver.AccessController
+ h http.Handler
+}
+
+func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if ch.ac.OriginAllowed("*") {
+ addCORSHeader(rw, "*")
+ } else if origin := req.Header.Get("Origin"); ch.ac.OriginAllowed(origin) {
+ addCORSHeader(rw, origin)
+ }
+
+ if req.Method == "OPTIONS" {
+ rw.WriteHeader(http.StatusOK)
+ return
+ }
+
+ ch.h.ServeHTTP(rw, req)
+}
+
+func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
+ if sctx.userHandlers[s] != nil {
+ sctx.lg.Warn("路径已被用户处理程序注册", zap.String("path", s))
+ return
+ }
+ sctx.userHandlers[s] = h
+}
+
+func (sctx *serveCtx) registerPprof() {
+ for p, h := range debugutil.PProfHandlers() {
+ sctx.registerUserHandler(p, h)
+ }
+}
+
+func (sctx *serveCtx) registerTrace() {
+ reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) }
+ sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf))
+ evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) }
+ sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf))
+}
+
+// ---------------------------------------- OVER --------------------------------------------------------------
+
+// grpcHandlerFunc 返回一个http.Handler,该Handler在接收到gRPC连接时委托给grpcServer,否则返回otherHandler.在gRPC文档中给出.
+func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler {
+ if otherHandler == nil {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ grpcServer.ServeHTTP(w, r)
+ })
+ }
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
+ grpcServer.ServeHTTP(w, r)
+ } else {
+ otherHandler.ServeHTTP(w, r)
+ }
+ })
+}
diff --git a/etcd/embed/util.go b/etcd/embed/util.go
new file mode 100644
index 00000000000..7c61b92ab3a
--- /dev/null
+++ b/etcd/embed/util.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "path/filepath"
+
+ "github.com/ls-2018/etcd_cn/etcd/wal"
+)
+
+// 判断wal目录存不存在
+func isMemberInitialized(cfg *Config) bool {
+ waldir := cfg.WalDir
+ if waldir == "" {
+ waldir = filepath.Join(cfg.Dir, "member", "wal")
+ }
+ return wal.Exist(waldir)
+}
diff --git a/etcd/etcdmain/config.go b/etcd/etcdmain/config.go
new file mode 100644
index 00000000000..70a49fd00c8
--- /dev/null
+++ b/etcd/etcdmain/config.go
@@ -0,0 +1,440 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Every change should be reflected on help.go as well.
+
+package etcdmain
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "runtime"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil"
+ cconfig "github.com/ls-2018/etcd_cn/etcd/config"
+ "github.com/ls-2018/etcd_cn/etcd/embed"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ "github.com/ls-2018/etcd_cn/pkg/flags"
+
+ "go.uber.org/zap"
+ "sigs.k8s.io/yaml"
+)
+
+var (
+ proxyFlagOff = "off"
+ proxyFlagReadonly = "readonly"
+ proxyFlagOn = "on"
+
+ fallbackFlagExit = "exit"
+ fallbackFlagProxy = "proxy"
+
+ ignored = []string{
+ "cluster-active-size",
+ "cluster-remove-delay",
+ "cluster-sync-interval",
+ "config",
+ "force",
+ "max-result-buffer",
+ "max-retry-attempts",
+ "peer-heartbeat-interval",
+ "peer-election-timeout",
+ "retry-interval",
+ "snapshot",
+ "v",
+ "vv",
+ // for coverage testing
+ "test.coverprofile",
+ "test.outputdir",
+ }
+)
+
+type configProxy struct {
+ ProxyFailureWaitMs uint `json:"proxy-failure-wait"` // 在重新考虑代理请求之前.endpoints 将处于失败状态的时间(以毫秒为单位).
+ ProxyRefreshIntervalMs uint `json:"proxy-refresh-interval"`
+ ProxyDialTimeoutMs uint `json:"proxy-dial-timeout"`
+ ProxyWriteTimeoutMs uint `json:"proxy-write-timeout"`
+ ProxyReadTimeoutMs uint `json:"proxy-read-timeout"`
+ Fallback string
+ Proxy string
+ ProxyJSON string `json:"proxy"`
+ FallbackJSON string `json:"discovery-fallback"`
+}
+
+// configFlags 是否有一组标志用于命令行解析配置
+type configFlags struct {
+ flagSet *flag.FlagSet
+ clusterState *flags.SelectiveStringValue // todo 设置new为初始静态或DNS引导期间出现的所有成员.如果将此选项设置为existing.则etcd将尝试加入现有群集.
+ fallback *flags.SelectiveStringValue
+ proxy *flags.SelectiveStringValue
+ v2deprecation *flags.SelectiveStringsValue
+}
+
+// config 保存etcd命令行调用的配置
+type config struct {
+ ec embed.Config
+ cp configProxy // 代理配置
+ cf configFlags // 是否有一组标志用于命令行解析配置
+ configFile string // 从文件加载服务器配置.
+ printVersion bool // 打印版本并退出
+ ignored []string
+}
+
+// OK
+func newConfig() *config {
+ cfg := &config{
+ ec: *embed.NewConfig(),
+ cp: configProxy{
+ Proxy: proxyFlagOff, // off
+ ProxyFailureWaitMs: 5000,
+ ProxyRefreshIntervalMs: 30000,
+ ProxyDialTimeoutMs: 1000,
+ ProxyWriteTimeoutMs: 5000,
+ },
+ ignored: ignored,
+ }
+ cfg.cf = configFlags{
+ flagSet: flag.NewFlagSet("etcd", flag.ContinueOnError),
+ clusterState: flags.NewSelectiveStringValue(
+ embed.ClusterStateFlagNew,
+ embed.ClusterStateFlagExisting,
+ ),
+ fallback: flags.NewSelectiveStringValue(
+ fallbackFlagProxy,
+ fallbackFlagExit,
+ ),
+ proxy: flags.NewSelectiveStringValue(
+ proxyFlagOff, // off
+ proxyFlagReadonly, // readonly
+ proxyFlagOn, // on
+ ),
+ v2deprecation: flags.NewSelectiveStringsValue(
+ string(cconfig.V2_DEPR_0_NOT_YET),
+ string(cconfig.V2_DEPR_1_WRITE_ONLY),
+ string(cconfig.V2_DEPR_1_WRITE_ONLY_DROP),
+ string(cconfig.V2_DEPR_2_GONE)),
+ }
+
+ fs := cfg.cf.flagSet
+ fs.Usage = func() {
+ fmt.Fprintln(os.Stderr, usageline)
+ }
+
+ fs.StringVar(&cfg.configFile, "config-file", "", "从文件加载服务器配置.")
+
+ // member
+ fs.StringVar(&cfg.ec.Dir, "data-dir", cfg.ec.Dir, "服务运行数据保存的路径. ${name}.etcd")
+ fs.StringVar(&cfg.ec.WalDir, "wal-dir", cfg.ec.WalDir, "专用wal目录的路径.默认值:--data-dir的路径下")
+ fs.Var(flags.NewUniqueURLsWithExceptions(embed.DefaultListenPeerURLs, ""), "listen-peer-urls", "和成员之间通信的地址.用于监听其他etcd member的url")
+ fs.Var(flags.NewUniqueURLsWithExceptions(embed.DefaultListenClientURLs, ""), "listen-client-urls", "对外提供服务的地址")
+ fs.Var(flags.NewUniqueURLsWithExceptions("", ""), "listen-metrics-urls", "要监听指标和运行状况端点的url列表.")
+ fs.UintVar(&cfg.ec.MaxSnapFiles, "max-snapshots", cfg.ec.MaxSnapFiles, "要保留的最大快照文件数(0表示不受限制).5")
+ fs.UintVar(&cfg.ec.MaxWalFiles, "max-wals", cfg.ec.MaxWalFiles, "要保留的最大wal文件数(0表示不受限制). 5")
+ fs.StringVar(&cfg.ec.Name, "name", cfg.ec.Name, "本节点.人类可读的名字")
+ // 作用:此配置值作为此节点在--initial-cluster标志中列出的条目(例如.default=http://localhost:2380)引用.若使用静态引导.则需要匹配标志中使用的密钥.使用发现时.每个成员必须具有唯一的名称.建议使用Hostname或者machine-id.
+ fs.Uint64Var(&cfg.ec.SnapshotCount, "snapshot-count", cfg.ec.SnapshotCount, "// 触发一次磁盘快照的提交事务的次数.")
+ fs.UintVar(&cfg.ec.TickMs, "heartbeat-interval", cfg.ec.TickMs, "心跳间隔 100ms")
+ fs.UintVar(&cfg.ec.ElectionMs, "election-timeout", cfg.ec.ElectionMs, "选举超时")
+ fs.BoolVar(&cfg.ec.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.ec.InitialElectionTickAdvance, "是否提前初始化选举时钟启动,以便更快的选举.")
+ fs.Int64Var(&cfg.ec.QuotaBackendBytes, "quota-backend-bytes", cfg.ec.QuotaBackendBytes, "当后端大小超过给定配额时(0默认为低空间配额).引发警报.")
+ fs.StringVar(&cfg.ec.BackendFreelistType, "backend-bbolt-freelist-type", cfg.ec.BackendFreelistType, "BackendFreelistType指定boltdb后端使用的freelist的类型(array and map是支持的类型). map ")
+ fs.DurationVar(&cfg.ec.BoltBackendBatchInterval, "backend-batch-interval", cfg.ec.BoltBackendBatchInterval, "BackendBatchInterval是提交后端事务前的最长时间.")
+ fs.IntVar(&cfg.ec.BoltBackendBatchLimit, "backend-batch-limit", cfg.ec.BoltBackendBatchLimit, "BackendBatchLimit是提交后端事务前的最大操作数.")
+ fs.UintVar(&cfg.ec.MaxTxnOps, "max-txn-ops", cfg.ec.MaxTxnOps, "事务中允许的最大操作数.")
+ fs.UintVar(&cfg.ec.MaxRequestBytes, "max-request-bytes", cfg.ec.MaxRequestBytes, "服务器将接受的最大客户端请求大小(字节).")
+ fs.DurationVar(&cfg.ec.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.ec.GRPCKeepAliveMinTime, "客户端在ping服务器之前应等待的最短持续时间间隔.")
+ fs.DurationVar(&cfg.ec.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.ec.GRPCKeepAliveInterval, "服务器到客户端ping的频率持续时间.以检查连接是否处于活动状态(0表示禁用).")
+ fs.DurationVar(&cfg.ec.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.ec.GRPCKeepAliveTimeout, "关闭非响应连接之前的额外持续等待时间(0表示禁用).20s")
+ fs.BoolVar(&cfg.ec.SocketOpts.ReusePort, "socket-reuse-port", cfg.ec.SocketOpts.ReusePort, "启用在listener上设置套接字选项SO_REUSEPORT.允许重新绑定一个已经在使用的端口.false")
+ fs.BoolVar(&cfg.ec.SocketOpts.ReuseAddress, "socket-reuse-address", cfg.ec.SocketOpts.ReuseAddress, "启用在listener上设置套接字选项SO_REUSEADDR 允许重新绑定一个已经在使用的端口 在`TIME_WAIT` 状态.")
+
+ // raft 连接超时
+ fs.DurationVar(&rafthttp.ConnReadTimeout, "raft-read-timeout", rafthttp.DefaultConnReadTimeout, "在每个rafthttp连接上设置的读取超时 5s")
+ fs.DurationVar(&rafthttp.ConnWriteTimeout, "raft-write-timeout", rafthttp.DefaultConnWriteTimeout, "在每个rafthttp连接上设置写入超时 5s")
+
+ // 集群
+ fs.Var(flags.NewUniqueURLsWithExceptions(embed.DefaultInitialAdvertisePeerURLs, ""), "initial-advertise-peer-urls", "集群成员的 URL地址.且会通告群集的其余成员节点.")
+ fs.Var(flags.NewUniqueURLsWithExceptions(embed.DefaultAdvertiseClientURLs, ""), "advertise-client-urls", "就是客户端(etcdctl/curl等)跟etcd服务进行交互时请求的url")
+ // 注意,不能写http://localhost:237,这样就是通知其他节点,可以用localhost访问,将导致ectd的客户端用localhost访问本地,导致访问不通.还有一个更可怕情况,ectd布置了代理层,代理层将一直通过locahost访问自己的代理接口,导致无限循环
+ fs.StringVar(&cfg.ec.Durl, "discovery", cfg.ec.Durl, "用于引导群集的发现URL.")
+ fs.Var(cfg.cf.fallback, "discovery-fallback", fmt.Sprintf(`发现服务失败时的预期行为("退出"或"代理")."proxy"仅支持v2 API. %q`, cfg.cf.fallback.Valids()))
+
+ fs.StringVar(&cfg.ec.Dproxy, "discovery-proxy", cfg.ec.Dproxy, "用于流量到发现服务的HTTP代理.")
+ fs.StringVar(&cfg.ec.DNSCluster, "discovery-srv", cfg.ec.DNSCluster, "DNS srv域用于引导群集.")
+ fs.StringVar(&cfg.ec.DNSClusterServiceName, "discovery-srv-name", cfg.ec.DNSClusterServiceName, "使用DNS引导时查询的DNS srv名称的后缀.")
+ fs.StringVar(&cfg.ec.InitialCluster, "initial-cluster", cfg.ec.InitialCluster, "用于引导初始集群配置,集群中所有节点的信息..")
+ fs.StringVar(&cfg.ec.InitialClusterToken, "initial-cluster-token", cfg.ec.InitialClusterToken, "创建集群的 token.这个值每个集群保持唯一.")
+ fs.Var(cfg.cf.clusterState, "initial-cluster-state", "初始集群状态 ('new' or 'existing').")
+
+ fs.BoolVar(&cfg.ec.StrictReconfigCheck, "strict-reconfig-check", cfg.ec.StrictReconfigCheck, "拒绝可能导致仲裁丢失的重新配置请求.true")
+
+ fs.BoolVar(&cfg.ec.PreVote, "pre-vote", cfg.ec.PreVote, "是否启用PreVote扩展,解决分区恢复选举bug")
+
+ fs.StringVar(&cfg.ec.ExperimentalEnableV2V3, "experimental-enable-v2v3", cfg.ec.ExperimentalEnableV2V3, "v3 prefix for serving emulated v2 state. Deprecated in 3.5. Will be decomissioned in 3.6.")
+ fs.Var(cfg.cf.v2deprecation, "v2-deprecation", fmt.Sprintf("v2store deprecation stage: %q. ", cfg.cf.proxy.Valids())) // off readonly on
+
+ // proxy
+ fs.Var(cfg.cf.proxy, "proxy", fmt.Sprintf("代理模式设置 %q", cfg.cf.proxy.Valids()))
+ fs.UintVar(&cfg.cp.ProxyFailureWaitMs, "proxy-failure-wait", cfg.cp.ProxyFailureWaitMs, "在重新考虑代理请求之前.endpoints 将处于失败状态的时间(以毫秒为单位).")
+ fs.UintVar(&cfg.cp.ProxyRefreshIntervalMs, "proxy-refresh-interval", cfg.cp.ProxyRefreshIntervalMs, "endpoints 刷新间隔的时间(以毫秒为单位).")
+ fs.UintVar(&cfg.cp.ProxyDialTimeoutMs, "proxy-dial-timeout", cfg.cp.ProxyDialTimeoutMs, "拨号超时的时间(以毫秒为单位)或0表示禁用超时")
+ fs.UintVar(&cfg.cp.ProxyWriteTimeoutMs, "proxy-write-timeout", cfg.cp.ProxyWriteTimeoutMs, "写入超时的时间(以毫秒为单位)或0以禁用超时.")
+ fs.UintVar(&cfg.cp.ProxyReadTimeoutMs, "proxy-read-timeout", cfg.cp.ProxyReadTimeoutMs, "读取超时的时间(以毫秒为单位)或0以禁用超时.")
+
+ // etcdctl通信的证书配置
+ fs.StringVar(&cfg.ec.ClientTLSInfo.CertFile, "cert-file", "", "客户端证书")
+ fs.StringVar(&cfg.ec.ClientTLSInfo.KeyFile, "key-file", "", "客户端私钥")
+
+ fs.StringVar(&cfg.ec.ClientTLSInfo.ClientCertFile, "client-cert-file", "", "验证client客户端时使用的 证书文件路径,否则在需要客户认证时将使用cert-file文件")
+ fs.StringVar(&cfg.ec.ClientTLSInfo.ClientKeyFile, "client-key-file", "", "验证client客户端时使用的 密钥文件路径,否则在需要客户认证时将使用key-file文件.")
+ fs.BoolVar(&cfg.ec.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "启用客户端证书验证;默认false")
+ fs.StringVar(&cfg.ec.ClientTLSInfo.CRLFile, "client-crl-file", "", "客户端证书吊销列表文件的路径.")
+ fs.StringVar(&cfg.ec.ClientTLSInfo.AllowedHostname, "client-cert-allowed-hostname", "", "允许客户端证书认证使用TLS主机名.")
+ fs.StringVar(&cfg.ec.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "客户端etcd通信 的可信CA证书文件")
+ fs.BoolVar(&cfg.ec.ClientAutoTLS, "auto-tls", false, "客户端TLS使用自动生成的证书")
+ // etcd通信之间的证书配置
+ fs.StringVar(&cfg.ec.PeerTLSInfo.CertFile, "peer-cert-file", "", "证书路径")
+ fs.StringVar(&cfg.ec.PeerTLSInfo.KeyFile, "peer-key-file", "", "私钥路径")
+
+ fs.StringVar(&cfg.ec.PeerTLSInfo.ClientCertFile, "peer-client-cert-file", "", "验证server客户端时使用的 证书文件路径,否则在需要客户认证时将使用cert-file文件")
+ fs.StringVar(&cfg.ec.PeerTLSInfo.ClientKeyFile, "peer-client-key-file", "", "验证server客户端时使用的 密钥文件路径,否则在需要客户认证时将使用key-file文件.")
+
+ fs.BoolVar(&cfg.ec.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "启用server客户端证书验证;默认false")
+ fs.StringVar(&cfg.ec.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "服务器端ca证书")
+ fs.BoolVar(&cfg.ec.PeerAutoTLS, "peer-auto-tls", false, "节点之间使用生成的证书通信;默认false")
+ fs.UintVar(&cfg.ec.SelfSignedCertValidity, "self-signed-cert-validity", 1, "客户端证书和同级证书的有效期,单位为年 ;etcd自动生成的 如果指定了ClientAutoTLS and PeerAutoTLS,")
+ fs.StringVar(&cfg.ec.PeerTLSInfo.CRLFile, "peer-crl-file", "", "服务端证书吊销列表文件的路径.")
+ fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedCN, "peer-cert-allowed-cn", "", "允许的server客户端证书CommonName")
+ fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedHostname, "peer-cert-allowed-hostname", "", "允许的server客户端证书hostname")
+ fs.Var(flags.NewStringsValue(""), "cipher-suites", "客户端/etcds之间支持的TLS加密套件的逗号分隔列表(空将由Go自动填充).")
+ fs.BoolVar(&cfg.ec.PeerTLSInfo.SkipClientSANVerify, "experimental-peer-skip-client-san-verification", false, "跳过server 客户端证书中SAN字段的验证.默认false")
+
+ fs.Var(flags.NewUniqueURLsWithExceptions("*", "*"), "cors", "逗号分隔的CORS白名单.或跨来源资源共享.(空或*表示允许所有)")
+ fs.Var(flags.NewUniqueStringsValue("*"), "host-whitelist", "如果etcd是不安全的(空意味着允许所有).用逗号分隔HTTP客户端请求中的可接受主机名.")
+
+ // 日志
+ fs.StringVar(&cfg.ec.Logger, "logger", "zap", "当前只支持zap,结构化数据")
+ fs.Var(flags.NewUniqueStringsValue(embed.DefaultLogOutput), "log-outputs", "指定'stdout'或'stderr'以跳过日志记录,即使在systemd或逗号分隔的输出目标列表下运行也是如此.")
+ fs.StringVar(&cfg.ec.LogLevel, "log-level", logutil.DefaultLogLevel, "日志等级,只支持 debug, info, warn, error, panic, or fatal. Default 'info'.")
+ fs.BoolVar(&cfg.ec.EnableLogRotation, "enable-log-rotation", false, "启用单个日志输出文件目标的日志旋转.")
+ fs.StringVar(&cfg.ec.LogRotationConfigJSON, "log-rotation-config-json", embed.DefaultLogRotationConfig, "是用于日志轮换的默认配置. 默认情况下,日志轮换是禁用的.")
+
+ // 版本
+ fs.BoolVar(&cfg.printVersion, "version", false, "打印版本并退出.")
+ //--auto-compaction-mode=revision --auto-compaction-retention=1000 每5分钟自动压缩"latest revision" - 1000;
+ //--auto-compaction-mode=periodic --auto-compaction-retention=12h 每1小时自动压缩并保留12小时窗口.
+ fs.StringVar(&cfg.ec.AutoCompactionRetention, "auto-compaction-retention", "0", "在一个小时内为mvcc键值存储的自动压缩.0表示禁用自动压缩.")
+ fs.StringVar(&cfg.ec.AutoCompactionMode, "auto-compaction-mode", "periodic", "基于时间保留的三种模式:periodic, revision")
+
+ // 性能分析器 通过 HTTP
+ fs.BoolVar(&cfg.ec.EnablePprof, "enable-pprof", false, `通过HTTP服务器启用运行时分析数据.地址位于客户端URL +/debug/pprof/`)
+
+ // additional metrics
+ fs.StringVar(&cfg.ec.Metrics, "metrics", cfg.ec.Metrics, `设置导出的指标的详细程度,指定"扩展"以包括直方图指标(extensive,basic)`)
+
+ // experimental distributed tracing
+ fs.BoolVar(&cfg.ec.ExperimentalEnableDistributedTracing, "experimental-enable-distributed-tracing", false, "Enable experimental distributed tracing using OpenTelemetry Tracing.")
+ fs.StringVar(&cfg.ec.ExperimentalDistributedTracingAddress, "experimental-distributed-tracing-address", embed.ExperimentalDistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag).")
+ fs.StringVar(&cfg.ec.ExperimentalDistributedTracingServiceName, "experimental-distributed-tracing-service-name", embed.ExperimentalDistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd.")
+ fs.StringVar(&cfg.ec.ExperimentalDistributedTracingServiceInstanceID, "experimental-distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). There is no default value set. This ID必须是unique per etcd instance.")
+
+ // auth
+ fs.StringVar(&cfg.ec.AuthToken, "auth-token", cfg.ec.AuthToken, "指定验证令牌的具体选项. ('simple' or 'jwt')")
+ fs.UintVar(&cfg.ec.BcryptCost, "bcrypt-cost", cfg.ec.BcryptCost, "为散列身份验证密码指定bcrypt算法的成本/强度.有效值介于4和31之间.")
+ fs.UintVar(&cfg.ec.AuthTokenTTL, "auth-token-ttl", cfg.ec.AuthTokenTTL, "token过期时间")
+
+ // gateway
+ fs.BoolVar(&cfg.ec.EnableGRPCGateway, "enable-grpc-gateway", cfg.ec.EnableGRPCGateway, "Enable GRPC gateway.")
+
+ // experimental
+ fs.BoolVar(&cfg.ec.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ec.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.")
+ fs.DurationVar(&cfg.ec.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ec.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.")
+
+ fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", true, "允许leader定期向其他成员发送检查点,以防止leader变化时剩余TTL重置")
+ // TODO: delete in v3.7
+ fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpointPersist, "experimental-enable-lease-checkpoint-persist", true, "启用持续的剩余TTL,以防止长期租赁的无限期自动续约.在v3.6中始终启用.应使用该功能以确保从启用该功能的v3.5集群顺利升级.需要启用experimental-enable-lease-checkpoint.")
+ fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
+ fs.DurationVar(&cfg.ec.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ec.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.")
+ fs.DurationVar(&cfg.ec.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ec.ExperimentalDowngradeCheckTime, "两次降级状态检查之间的时间间隔.")
+ fs.DurationVar(&cfg.ec.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ec.ExperimentalWarningApplyDuration, "时间长度.如果应用请求的时间超过这个值.就会产生一个警告.")
+ fs.BoolVar(&cfg.ec.ExperimentalMemoryMlock, "experimental-memory-mlock", cfg.ec.ExperimentalMemoryMlock, "启用强制执行etcd页面(特别是bbolt)留在RAM中.")
+ fs.BoolVar(&cfg.ec.ExperimentalTxnModeWriteWithSharedBuffer, "experimental-txn-mode-write-with-shared-buffer", true, "启用写事务在其只读检查操作中使用共享缓冲区.")
+ fs.UintVar(&cfg.ec.ExperimentalBootstrapDefragThresholdMegabytes, "experimental-bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd etcd bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.")
+
+ // 非安全
+ fs.BoolVar(&cfg.ec.UnsafeNoFsync, "unsafe-no-fsync", false, "禁用fsync,不安全,会导致数据丢失.")
+ fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "强制创建新的单成员群集.它提交配置更改,强制删除集群中的所有现有成员并添加自身.需要将其设置为还原备份.")
+
+ // ignored
+ for _, f := range cfg.ignored {
+ fs.Var(&flags.IgnoredFlag{Name: f}, f, "")
+ }
+ return cfg
+}
+
+// OK
+func (cfg *config) parse(arguments []string) error {
+ perr := cfg.cf.flagSet.Parse(arguments)
+ switch perr {
+ case nil:
+ case flag.ErrHelp:
+ fmt.Println(flagsline)
+ os.Exit(0)
+ default:
+ os.Exit(2)
+ }
+ if len(cfg.cf.flagSet.Args()) != 0 {
+ return fmt.Errorf("'%s'不是一个有效的标志 ", cfg.cf.flagSet.Arg(0))
+ }
+
+ if cfg.printVersion {
+ fmt.Printf("etcd Version: %s\n", version.Version)
+ fmt.Printf("Git SHA: %s\n", version.GitSHA)
+ fmt.Printf("Go Version: %s\n", runtime.Version())
+ fmt.Printf("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
+ os.Exit(0)
+ }
+
+ var err error
+
+ // 这个env变量必须被单独解析,因为我们需要根据配置文件是否被设置,来决定是使用还是忽略env变量.
+ if cfg.configFile == "" {
+ cfg.configFile = os.Getenv(flags.FlagToEnv("ETCD", "config-file")) // ETCD_CONFIG_FILE
+ }
+
+ if cfg.configFile != "" {
+ err = cfg.configFromFile(cfg.configFile)
+ if lg := cfg.ec.GetLogger(); lg != nil {
+ lg.Info("加载的etcd配置,其他配置的命令行标志和环境变量将被忽略,如果提供了", zap.String("path", cfg.configFile))
+ }
+ } else {
+ err = cfg.configFromCmdLine()
+ }
+ if runtime.GOOS == "windows" {
+ fmt.Println(os.RemoveAll(fmt.Sprintf("E:\\etcd_cn\\%s.etcd", cfg.ec.Name)))
+ } else {
+ fmt.Println(os.RemoveAll(fmt.Sprintf("/Users/liushuo/Desktop/source_code/etcd_cn/%s.etcd", cfg.ec.Name)))
+ }
+ return err
+}
+
+// OK
+func (cfg *config) configFromCmdLine() error {
+ // 用户指定的记录器尚未设置,在标志解析过程中使用此记录器
+ lg, err := zap.NewProduction()
+ if err != nil {
+ return err
+ }
+ err = flags.SetFlagsFromEnv(lg, "ETCD", cfg.cf.flagSet) // 解析给定flagset中的所有注册标志,如果它们还没有被设置,则尝试从环境变量中设置其值.
+ if err != nil {
+ return err
+ }
+
+ if rafthttp.ConnReadTimeout < rafthttp.DefaultConnReadTimeout {
+ rafthttp.ConnReadTimeout = rafthttp.DefaultConnReadTimeout
+ lg.Info(fmt.Sprintf("raft-read-timeout : %v", rafthttp.DefaultConnReadTimeout))
+ }
+ if rafthttp.ConnWriteTimeout < rafthttp.DefaultConnWriteTimeout {
+ rafthttp.ConnWriteTimeout = rafthttp.DefaultConnWriteTimeout
+ lg.Info(fmt.Sprintf("raft-write-timeout increased to minimum value: %v", rafthttp.DefaultConnWriteTimeout))
+ }
+ // 集群节点之间通信监听的URL;如果指定的IP是0.0.0.0,那么etcd 会监昕所有网卡的指定端口
+ cfg.ec.LPUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls")
+ cfg.ec.APUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls")
+ cfg.ec.LCUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-urls")
+ cfg.ec.ACUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "advertise-client-urls")
+ cfg.ec.ListenMetricsUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-metrics-urls")
+
+ cfg.ec.CORS = flags.UniqueURLsMapFromFlag(cfg.cf.flagSet, "cors")
+ cfg.ec.HostWhitelist = flags.UniqueStringsMapFromFlag(cfg.cf.flagSet, "host-whitelist")
+
+ cfg.ec.CipherSuites = flags.StringsFromFlag(cfg.cf.flagSet, "cipher-suites")
+
+ cfg.ec.LogOutputs = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-outputs")
+
+ cfg.ec.ClusterState = cfg.cf.clusterState.String()
+ cfg.cp.Fallback = cfg.cf.fallback.String() // proxy
+ cfg.cp.Proxy = cfg.cf.proxy.String() // off
+
+ // 如果设置了lcurls,则禁用默认的 advertise-client-urls
+ fmt.Println(`flags.IsSet(cfg.cf.flagSet, "listen-client-urls")`, flags.IsSet(cfg.cf.flagSet, "listen-client-urls"))
+ fmt.Println(`flags.IsSet(cfg.cf.flagSet, "advertise-client-urls")`, flags.IsSet(cfg.cf.flagSet, "advertise-client-urls"))
+ missingAC := flags.IsSet(cfg.cf.flagSet, "listen-client-urls") && !flags.IsSet(cfg.cf.flagSet, "advertise-client-urls")
+ // todo 没看懂
+ if !cfg.mayBeProxy() && missingAC {
+ cfg.ec.ACUrls = nil
+ }
+
+ // 如果设置了discovery则禁用默认初始集群
+ if (cfg.ec.Durl != "" || cfg.ec.DNSCluster != "" || cfg.ec.DNSClusterServiceName != "") && !flags.IsSet(cfg.cf.flagSet, "initial-cluster") {
+ cfg.ec.InitialCluster = ""
+ }
+
+ return cfg.validate() // √
+}
+
+// OK
+func (cfg *config) configFromFile(path string) error {
+ eCfg, err := embed.ConfigFromFile(path)
+ if err != nil {
+ return err
+ }
+ cfg.ec = *eCfg
+
+ // 加载额外的配置信息
+ b, rerr := ioutil.ReadFile(path)
+ if rerr != nil {
+ return rerr
+ }
+ if yerr := yaml.Unmarshal(b, &cfg.cp); yerr != nil {
+ return yerr
+ }
+
+ if cfg.cp.FallbackJSON != "" {
+ if err := cfg.cf.fallback.Set(cfg.cp.FallbackJSON); err != nil {
+ log.Fatalf("设置时出现意外错误 discovery-fallback flag: %v", err)
+ }
+ cfg.cp.Fallback = cfg.cf.fallback.String()
+ }
+
+ if cfg.cp.ProxyJSON != "" {
+ if err := cfg.cf.proxy.Set(cfg.cp.ProxyJSON); err != nil {
+ log.Fatalf("设置时出现意外错误 proxyFlag: %v", err)
+ }
+ cfg.cp.Proxy = cfg.cf.proxy.String()
+ }
+ return nil
+}
+
+func (cfg *config) mayBeProxy() bool {
+ mayFallbackToProxy := cfg.ec.Durl != "" && cfg.cp.Fallback == fallbackFlagProxy
+ return cfg.cp.Proxy != proxyFlagOff || mayFallbackToProxy
+}
+
+func (cfg *config) validate() error {
+ err := cfg.ec.Validate()
+ // TODO(yichengq): 通过 discovery service case加入,请检查这一点.
+ if err == embed.ErrUnsetAdvertiseClientURLsFlag && cfg.mayBeProxy() {
+ return nil
+ }
+ return err
+}
+
+// 是否开启代理模式
+func (cfg config) isProxy() bool { return cfg.cf.proxy.String() != proxyFlagOff }
+func (cfg config) isReadonlyProxy() bool { return cfg.cf.proxy.String() == proxyFlagReadonly }
+func (cfg config) shouldFallbackToProxy() bool { return cfg.cf.fallback.String() == fallbackFlagProxy }
diff --git a/server/etcdmain/doc.go b/etcd/etcdmain/doc.go
similarity index 100%
rename from server/etcdmain/doc.go
rename to etcd/etcdmain/doc.go
diff --git a/etcd/etcdmain/etcd.go b/etcd/etcdmain/etcd.go
new file mode 100644
index 00000000000..42415ca7cf7
--- /dev/null
+++ b/etcd/etcdmain/etcd.go
@@ -0,0 +1,445 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/embed"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2discovery"
+ "github.com/ls-2018/etcd_cn/etcd/proxy/httpproxy"
+ pkgioutil "github.com/ls-2018/etcd_cn/pkg/ioutil"
+ "github.com/ls-2018/etcd_cn/pkg/osutil"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+)
+
+// 数据目录下的几种子目录
+type dirType string
+
+// member、proxy只能存在一种
+// 都没有就返回empty[节点运行之初]
+var (
+ dirMember = dirType("member")
+ dirProxy = dirType("proxy")
+ dirEmpty = dirType("empty")
+)
+
+func startEtcdOrProxy(args []string) {
+ grpc.EnableTracing = false
+
+ cfg := newConfig()
+ defaultInitialCluster := cfg.ec.InitialCluster
+
+ err := cfg.parse(args[1:])
+ lg := cfg.ec.GetLogger()
+ // 如果我们未能解析整个配置,最好使用配置中已解决的记录器来打印错误,但如果不存在,则创建一个新的临时记录器.
+ if lg == nil {
+ var zapError error
+ lg, zapError = zap.NewProduction()
+ if zapError != nil {
+ fmt.Printf("创建zap logger失败%v", zapError)
+ os.Exit(1)
+ }
+ }
+ lg.Info("运行中:", zap.Strings("args", args))
+ if err != nil {
+ lg.Warn("未能验证标志", zap.Error(err))
+ switch err {
+ case embed.ErrUnsetAdvertiseClientURLsFlag:
+ lg.Warn("advertise client URLs are not set", zap.Error(err))
+ }
+ os.Exit(1)
+ }
+ // err := cfg.ZapLoggerBuilder(cfg)
+ cfg.ec.SetupGlobalLoggers()
+
+ defer func() {
+ logger := cfg.ec.GetLogger()
+ if logger != nil {
+ logger.Sync()
+ }
+ }()
+ // TODO 没明白这个函数是干啥的, 防止Name发生变化,InitialCluster没有生效
+ defaultHost, dhErr := (&cfg.ec).UpdateDefaultClusterFromName(defaultInitialCluster)
+ if defaultHost != "" {
+ lg.Info("检测到默认的advertise主机", zap.String("host", defaultHost))
+ }
+ if dhErr != nil {
+ lg.Info("未能检测到默认主机", zap.Error(dhErr))
+ }
+
+ if cfg.ec.Dir == "" {
+ cfg.ec.Dir = fmt.Sprintf("%v.etcd", cfg.ec.Name)
+ lg.Warn("'data-dir'是空的,使用默认的default", zap.String("data-dir", cfg.ec.Dir))
+ }
+
+ var stopped <-chan struct{}
+ var errc <-chan error
+ // 识别数据目录, 返回data dir的类型.
+ which := identifyDataDirOrDie(cfg.ec.GetLogger(), cfg.ec.Dir)
+ if which != dirEmpty {
+ lg.Info("etcd数据已经被初始化了", zap.String("data-dir", cfg.ec.Dir), zap.String("dir-type", string(which)))
+ switch which {
+ case dirMember:
+ stopped, errc, err = startEtcd(&cfg.ec)
+ case dirProxy:
+ err = startProxy(cfg)
+ default:
+ lg.Panic("未知目录类型", zap.String("dir-type", string(which)))
+ }
+ } else {
+ shouldProxy := cfg.isProxy() // 是否开启代理模式
+ if !shouldProxy { // 一般不会开启
+ stopped, errc, err = startEtcd(&cfg.ec)
+ // todo 还没看
+ if derr, ok := err.(*etcdserver.DiscoveryError); ok && derr.Err == v2discovery.ErrFullCluster {
+ if cfg.shouldFallbackToProxy() {
+ lg.Warn("discovery cluster is full, falling back to proxy", zap.String("fallback-proxy", fallbackFlagProxy), zap.Error(err))
+ shouldProxy = true
+ }
+ } else if err != nil {
+ lg.Warn("failed to start etcd", zap.Error(err))
+ }
+ }
+ if shouldProxy {
+ err = startProxy(cfg)
+ }
+ }
+
+ if err != nil {
+ if derr, ok := err.(*etcdserver.DiscoveryError); ok {
+ switch derr.Err {
+ case v2discovery.ErrDuplicateID:
+ lg.Warn("member has been registered with discovery service", zap.String("name", cfg.ec.Name), zap.String("discovery-token", cfg.ec.Durl), zap.Error(derr.Err))
+ lg.Warn("but could not find valid cluster configuration", zap.String("data-dir", cfg.ec.Dir))
+ lg.Warn("check data dir if previous bootstrap succeeded")
+ lg.Warn("or use a new discovery token if previous bootstrap failed")
+
+ case v2discovery.ErrDuplicateName:
+ lg.Warn("member with duplicated name has already been registered", zap.String("discovery-token", cfg.ec.Durl), zap.Error(derr.Err))
+ lg.Warn("cURL the discovery token URL for details")
+ lg.Warn("do not reuse discovery token; generate a new one to bootstrap a cluster")
+
+ default:
+ lg.Warn("failed to bootstrap; discovery token was already used", zap.String("discovery-token", cfg.ec.Durl), zap.Error(err))
+ lg.Warn("do not reuse discovery token; generate a new one to bootstrap a cluster")
+ }
+ os.Exit(1)
+ }
+
+ if strings.Contains(err.Error(), "include") && strings.Contains(err.Error(), "--initial-cluster") {
+ lg.Warn("failed to start", zap.Error(err))
+ if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) {
+ lg.Warn("forgot to set --initial-cluster?")
+ }
+ if types.URLs(cfg.ec.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
+ lg.Warn("forgot to set --initial-advertise-peer-urls?")
+ }
+ if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) && len(cfg.ec.Durl) == 0 {
+ lg.Warn("--discovery flag is not set")
+ }
+ os.Exit(1)
+ }
+ lg.Fatal("discovery failed", zap.Error(err))
+ }
+
+ osutil.HandleInterrupts(lg)
+
+ // At this point, the initialization of etcd is done.
+ // The listeners are listening on the TCP ports and ready
+ // for accepting connections. The etcd instance should be
+ // joined with the cluster and ready to serve incoming
+ // connections.
+ notifySystemd(lg)
+
+ select {
+ case lerr := <-errc:
+ // fatal out on listener errors
+ lg.Fatal("listener failed", zap.Error(lerr))
+ case <-stopped:
+ }
+
+ osutil.Exit(0)
+}
+
+// startEtcd
+func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) {
+ e, err := embed.StartEtcd(cfg) // 异步启动etcd| http
+ if err != nil {
+ return nil, nil, err
+ }
+ osutil.RegisterInterruptHandler(e.Close) // 注册中断处理程序,但不会执行
+ select {
+ case <-e.Server.ReadyNotify(): // 等待本节点加入集群
+ case <-e.Server.StopNotify(): // 收到了异常
+ }
+ return e.Server.StopNotify(), e.Err(), nil
+}
+
+// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
+func startProxy(cfg *config) error {
+ lg := cfg.ec.GetLogger()
+ lg.Info("v2 API proxy starting")
+
+ clientTLSInfo := cfg.ec.ClientTLSInfo
+ if clientTLSInfo.Empty() {
+ // Support old proxy behavior of defaulting to PeerTLSInfo
+ // for both client and peer connections.
+ clientTLSInfo = cfg.ec.PeerTLSInfo
+ }
+ clientTLSInfo.InsecureSkipVerify = cfg.ec.ClientAutoTLS
+ cfg.ec.PeerTLSInfo.InsecureSkipVerify = cfg.ec.PeerAutoTLS
+
+ pt, err := transport.NewTimeoutTransport(
+ clientTLSInfo,
+ time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond,
+ time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond,
+ time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond,
+ )
+ if err != nil {
+ return err
+ }
+ pt.MaxIdleConnsPerHost = httpproxy.DefaultMaxIdleConnsPerHost
+
+ if err = cfg.ec.PeerSelfCert(); err != nil {
+ lg.Fatal("failed to get self-signed certs for peer", zap.Error(err))
+ }
+ tr, err := transport.NewTimeoutTransport(
+ cfg.ec.PeerTLSInfo,
+ time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond,
+ time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond,
+ time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond,
+ )
+ if err != nil {
+ return err
+ }
+
+ cfg.ec.Dir = filepath.Join(cfg.ec.Dir, "proxy")
+ err = fileutil.TouchDirAll(cfg.ec.Dir)
+ if err != nil {
+ return err
+ }
+
+ var peerURLs []string
+ clusterfile := filepath.Join(cfg.ec.Dir, "cluster")
+
+ b, err := ioutil.ReadFile(clusterfile)
+ switch {
+ case err == nil:
+ if cfg.ec.Durl != "" {
+ lg.Warn(
+ "discovery token ignored since the proxy has already been initialized; valid cluster file found",
+ zap.String("cluster-file", clusterfile),
+ )
+ }
+ if cfg.ec.DNSCluster != "" {
+ lg.Warn(
+ "DNS SRV discovery ignored since the proxy has already been initialized; valid cluster file found",
+ zap.String("cluster-file", clusterfile),
+ )
+ }
+ urls := struct{ PeerURLs []string }{}
+ err = json.Unmarshal(b, &urls)
+ if err != nil {
+ return err
+ }
+ peerURLs = urls.PeerURLs
+ lg.Info(
+ "proxy using peer URLS from cluster file",
+ zap.Strings("peer-urls", peerURLs),
+ zap.String("cluster-file", clusterfile),
+ )
+
+ case os.IsNotExist(err):
+ var urlsmap types.URLsMap
+ urlsmap, _, err = cfg.ec.PeerURLsMapAndToken("proxy")
+ if err != nil {
+ return fmt.Errorf("error setting up initial cluster: %v", err)
+ }
+
+ if cfg.ec.Durl != "" {
+ var s string
+ s, err = v2discovery.GetCluster(lg, cfg.ec.Durl, cfg.ec.Dproxy)
+ if err != nil {
+ return err
+ }
+ if urlsmap, err = types.NewURLsMap(s); err != nil {
+ return err
+ }
+ }
+ peerURLs = urlsmap.URLs()
+ lg.Info("proxy using peer URLS", zap.Strings("peer-urls", peerURLs))
+
+ default:
+ return err
+ }
+
+ clientURLs := []string{}
+ uf := func() []string {
+ gcls, gerr := etcdserver.GetClusterFromRemotePeers(lg, peerURLs, tr)
+ if gerr != nil {
+ lg.Warn(
+ "failed to get cluster from remote peers",
+ zap.Strings("peer-urls", peerURLs),
+ zap.Error(gerr),
+ )
+ return []string{}
+ }
+
+ clientURLs = gcls.ClientURLs()
+ urls := struct{ PeerURLs []string }{gcls.PeerURLs()}
+ b, jerr := json.Marshal(urls)
+ if jerr != nil {
+ lg.Warn("proxy failed to marshal peer URLs", zap.Error(jerr))
+ return clientURLs
+ }
+
+ err = pkgioutil.WriteAndSyncFile(clusterfile+".bak", b, 0o600)
+ if err != nil {
+ lg.Warn("proxy failed to write cluster file", zap.Error(err))
+ return clientURLs
+ }
+ err = os.Rename(clusterfile+".bak", clusterfile)
+ if err != nil {
+ lg.Warn(
+ "proxy failed to rename cluster file",
+ zap.String("path", clusterfile),
+ zap.Error(err),
+ )
+ return clientURLs
+ }
+ if !reflect.DeepEqual(gcls.PeerURLs(), peerURLs) {
+ lg.Info(
+ "proxy updated peer URLs",
+ zap.Strings("from", peerURLs),
+ zap.Strings("to", gcls.PeerURLs()),
+ )
+ }
+ peerURLs = gcls.PeerURLs()
+
+ return clientURLs
+ }
+ ph := httpproxy.NewHandler(lg, pt, uf, time.Duration(cfg.cp.ProxyFailureWaitMs)*time.Millisecond, time.Duration(cfg.cp.ProxyRefreshIntervalMs)*time.Millisecond)
+ ph = embed.WrapCORS(cfg.ec.CORS, ph)
+
+ if cfg.isReadonlyProxy() {
+ ph = httpproxy.NewReadonlyHandler(ph)
+ }
+
+ // setup self signed certs when serving https
+ cHosts, cTLS := []string{}, false
+ for _, u := range cfg.ec.LCUrls {
+ cHosts = append(cHosts, u.Host)
+ cTLS = cTLS || u.Scheme == "https"
+ }
+ for _, u := range cfg.ec.ACUrls {
+ cHosts = append(cHosts, u.Host)
+ cTLS = cTLS || u.Scheme == "https"
+ }
+ listenerTLS := cfg.ec.ClientTLSInfo
+ if cfg.ec.ClientAutoTLS && cTLS {
+ listenerTLS, err = transport.SelfCert(cfg.ec.GetLogger(), filepath.Join(cfg.ec.Dir, "clientCerts"), cHosts, cfg.ec.SelfSignedCertValidity)
+ if err != nil {
+ lg.Fatal("failed to initialize self-signed client cert", zap.Error(err))
+ }
+ }
+
+ // Start a proxy etcd goroutine for each listen address
+ for _, u := range cfg.ec.LCUrls {
+ l, err := transport.NewListener(u.Host, u.Scheme, &listenerTLS)
+ if err != nil {
+ return err
+ }
+
+ host := u.String()
+ go func() {
+ lg.Info("v2 proxy started listening on client requests", zap.String("host", host))
+ mux := http.NewServeMux()
+ etcdhttp.HandlePrometheus(mux) // v2 proxy just uses the same port
+ mux.Handle("/", ph)
+ lg.Fatal("done serving", zap.Error(http.Serve(l, mux)))
+ }()
+ }
+ return nil
+}
+
+// identifyDataDirOrDie 识别数据目录, 返回data dir的类型. 如果datadir无效,则视为无效.
+func identifyDataDirOrDie(lg *zap.Logger, dir string) dirType {
+ names, err := fileutil.ReadDir(dir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return dirEmpty
+ }
+ lg.Fatal("未能列出数据目录", zap.String("dir", dir), zap.Error(err))
+ }
+
+ var m, p bool
+ for _, name := range names {
+ switch dirType(name) {
+ case dirMember:
+ m = true
+ case dirProxy:
+ p = true
+ default:
+ lg.Warn("在数据目录下发现无效的文件", zap.String("filename", name), zap.String("data-dir", dir))
+ }
+ }
+
+ if m && p {
+ lg.Fatal("无效的数据目录,成员目录和代理目录都存在")
+ }
+ if m {
+ return dirMember
+ }
+ if p {
+ return dirProxy
+ }
+ return dirEmpty
+}
+
+// 检查系统是否支持
+func checkSupportArch() {
+ if runtime.GOARCH == "amd64" ||
+ runtime.GOARCH == "arm64" ||
+ runtime.GOARCH == "ppc64le" ||
+ runtime.GOARCH == "s390x" {
+ return
+ }
+ // 不支持的架构 仅通过环境变量配置,因此在这里取消设置不通过解析标志
+ defer os.Unsetenv("ETCD_UNSUPPORTED_ARCH")
+ if env, ok := os.LookupEnv("ETCD_UNSUPPORTED_ARCH"); ok && env == runtime.GOARCH {
+ fmt.Printf("在不支持的体系结构上运行etcd%q 当 ETCD_UNSUPPORTED_ARCH 设置了\n", env)
+ return
+ }
+
+ fmt.Printf("etcd在不支持ETCD_UNSUPPORTED_ARCH的平台上=%s set\n", runtime.GOARCH)
+ os.Exit(1)
+}
diff --git a/server/etcdmain/gateway.go b/etcd/etcdmain/gateway.go
similarity index 92%
rename from server/etcdmain/gateway.go
rename to etcd/etcdmain/gateway.go
index 64fb90df2c1..830f3e771fd 100644
--- a/server/etcdmain/gateway.go
+++ b/etcd/etcdmain/gateway.go
@@ -21,8 +21,7 @@ import (
"os"
"time"
- "go.etcd.io/etcd/client/pkg/v3/logutil"
- "go.etcd.io/etcd/server/v3/proxy/tcpproxy"
+ "github.com/ls-2018/etcd_cn/etcd/proxy/tcpproxy"
"github.com/spf13/cobra"
"go.uber.org/zap"
@@ -38,13 +37,11 @@ var (
gatewayCA string
)
-var (
- rootCmd = &cobra.Command{
- Use: "etcd",
- Short: "etcd server",
- SuggestFor: []string{"etcd"},
- }
-)
+var rootCmd = &cobra.Command{
+ Use: "etcd",
+ Short: "etcd etcd",
+ SuggestFor: []string{"etcd"},
+}
func init() {
rootCmd.AddCommand(newGatewayCommand())
@@ -72,7 +69,7 @@ func newGatewayStartCommand() *cobra.Command {
cmd.Flags().StringVar(&gatewayDNSCluster, "discovery-srv", "", "DNS domain used to bootstrap initial cluster")
cmd.Flags().StringVar(&gatewayDNSClusterServiceName, "discovery-srv-name", "", "service name to query when using DNS discovery")
cmd.Flags().BoolVar(&gatewayInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records")
- cmd.Flags().StringVar(&gatewayCA, "trusted-ca-file", "", "path to the client server TLS CA file for verifying the discovered endpoints when discovery-srv is provided.")
+ cmd.Flags().StringVar(&gatewayCA, "trusted-ca-file", "", "path to the client etcd TLS CA file for verifying the discovered endpoints when discovery-srv is provided.")
cmd.Flags().StringSliceVar(&gatewayEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints")
@@ -93,7 +90,8 @@ func stripSchema(eps []string) []string {
}
func startGateway(cmd *cobra.Command, args []string) {
- lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
+ var lg *zap.Logger
+ lg, err := zap.NewProduction()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
diff --git a/etcd/etcdmain/grpc_proxy.go b/etcd/etcdmain/grpc_proxy.go
new file mode 100644
index 00000000000..a07c59e6dc0
--- /dev/null
+++ b/etcd/etcdmain/grpc_proxy.go
@@ -0,0 +1,493 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "time"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/leasing"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/namespace"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/ordering"
+ "github.com/ls-2018/etcd_cn/etcd/embed"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb"
+ "github.com/ls-2018/etcd_cn/etcd/proxy/grpcproxy"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/debugutil"
+ "go.uber.org/zap/zapgrpc"
+
+ grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+ "github.com/soheilhy/cmux"
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/keepalive"
+)
+
+var (
+ grpcProxyListenAddr string
+ grpcProxyMetricsListenAddr string
+ grpcProxyEndpoints []string
+ grpcProxyDNSCluster string
+ grpcProxyDNSClusterServiceName string
+ grpcProxyInsecureDiscovery bool
+ grpcProxyDataDir string
+ grpcMaxCallSendMsgSize int
+ grpcMaxCallRecvMsgSize int
+
+ // tls for connecting to etcd
+
+ grpcProxyCA string
+ grpcProxyCert string
+ grpcProxyKey string
+ grpcProxyInsecureSkipTLSVerify bool
+
+ // tls for clients connecting to proxy
+
+ grpcProxyListenCA string
+ grpcProxyListenCert string
+ grpcProxyListenKey string
+ grpcProxyListenAutoTLS bool
+ grpcProxyListenCRL string
+ selfSignedCertValidity uint
+
+ grpcProxyAdvertiseClientURL string
+ grpcProxyResolverPrefix string
+ grpcProxyResolverTTL int
+
+ grpcProxyNamespace string
+ grpcProxyLeasing string
+
+ grpcProxyEnablePprof bool
+ grpcProxyEnableOrdering bool
+
+ grpcProxyDebug bool
+
+ // GRPC keep alive related options.
+ grpcKeepAliveMinTime time.Duration
+ grpcKeepAliveTimeout time.Duration
+ grpcKeepAliveInterval time.Duration
+)
+
+const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024
+
+func init() {
+ rootCmd.AddCommand(newGRPCProxyCommand())
+}
+
+// newGRPCProxyCommand returns the cobra command for "grpc-proxy".
+func newGRPCProxyCommand() *cobra.Command {
+ lpc := &cobra.Command{
+ Use: "grpc-proxy ",
+ Short: "grpc-proxy related command",
+ }
+ lpc.AddCommand(newGRPCProxyStartCommand())
+
+ return lpc
+}
+
+func newGRPCProxyStartCommand() *cobra.Command {
+ cmd := cobra.Command{
+ Use: "start",
+ Short: "start the grpc proxy",
+ Run: startGRPCProxy,
+ }
+
+ cmd.Flags().StringVar(&grpcProxyListenAddr, "listen-addr", "127.0.0.1:23790", "listen address")
+ cmd.Flags().StringVar(&grpcProxyDNSCluster, "discovery-srv", "", "domain name to query for SRV records describing cluster endpoints")
+ cmd.Flags().StringVar(&grpcProxyDNSClusterServiceName, "discovery-srv-name", "", "service name to query when using DNS discovery")
+ cmd.Flags().StringVar(&grpcProxyMetricsListenAddr, "metrics-addr", "", "listen for endpoint /metrics requests on an additional interface")
+ cmd.Flags().BoolVar(&grpcProxyInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records")
+ cmd.Flags().StringSliceVar(&grpcProxyEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints")
+ cmd.Flags().StringVar(&grpcProxyAdvertiseClientURL, "advertise-client-url", "127.0.0.1:23790", "advertise address to register (must be reachable by client)")
+ cmd.Flags().StringVar(&grpcProxyResolverPrefix, "resolver-prefix", "", "prefix to use for registering proxy (must be shared with other grpc-proxy members)")
+ cmd.Flags().IntVar(&grpcProxyResolverTTL, "resolver-ttl", 0, "specify TTL, in seconds, when registering proxy endpoints")
+ cmd.Flags().StringVar(&grpcProxyNamespace, "namespace", "", "string to prefix to all keys for namespacing requests")
+ cmd.Flags().BoolVar(&grpcProxyEnablePprof, "enable-pprof", false, `Enable runtime profiling data via HTTP etcd. Address is at client URL + "/debug/pprof/"`)
+ cmd.Flags().StringVar(&grpcProxyDataDir, "data-dir", "default.proxy", "Data directory for persistent data")
+ cmd.Flags().IntVar(&grpcMaxCallSendMsgSize, "max-send-bytes", defaultGRPCMaxCallSendMsgSize, "message send limits in bytes (default value is 1.5 MiB)")
+ cmd.Flags().IntVar(&grpcMaxCallRecvMsgSize, "max-recv-bytes", math.MaxInt32, "message receive limits in bytes (default value is math.MaxInt32)")
+ cmd.Flags().DurationVar(&grpcKeepAliveMinTime, "grpc-keepalive-min-time", embed.DefaultGRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging proxy.")
+ cmd.Flags().DurationVar(&grpcKeepAliveInterval, "grpc-keepalive-interval", embed.DefaultGRPCKeepAliveInterval, "Frequency duration of etcd-to-client ping to check if a connection is alive (0 to disable).")
+ cmd.Flags().DurationVar(&grpcKeepAliveTimeout, "grpc-keepalive-timeout", embed.DefaultGRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).")
+
+ // client TLS for connecting to etcd
+ cmd.Flags().StringVar(&grpcProxyCert, "cert", "", "identify secure connections with etcd servers using this TLS certificate file")
+ cmd.Flags().StringVar(&grpcProxyKey, "key", "", "identify secure connections with etcd servers using this TLS key file")
+ cmd.Flags().StringVar(&grpcProxyCA, "cacert", "", "verify certificates of TLS-enabled secure etcd servers using this CA bundle")
+ cmd.Flags().BoolVar(&grpcProxyInsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip authentication of etcd etcd TLS certificates (CAUTION: this option should be enabled only for testing purposes)")
+
+ // client TLS for connecting to proxy
+ cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file")
+ cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file")
+ cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle")
+ cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates")
+ cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.")
+ cmd.Flags().UintVar(&selfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the proxy certificates, unit is year")
+
+ // experimental flags
+ cmd.Flags().BoolVar(&grpcProxyEnableOrdering, "experimental-serializable-ordering", false, "Ensure serializable reads have monotonically increasing store revisions across endpoints.")
+ cmd.Flags().StringVar(&grpcProxyLeasing, "experimental-leasing-prefix", "", "leasing metadata prefix for disconnected linearized reads.")
+
+ cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.")
+
+ return &cmd
+}
+
+func startGRPCProxy(cmd *cobra.Command, args []string) {
+ checkArgs()
+
+ lcfg := logutil.DefaultZapLoggerConfig
+ if grpcProxyDebug {
+ lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
+ grpc.EnableTracing = true
+ }
+
+ lg, err := lcfg.Build()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer lg.Sync()
+
+ grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
+
+ // The proxy itself (ListenCert) can have not-empty CN.
+ // The empty CN is required for grpcProxyCert.
+ // Please see https://github.com/etcd-io/etcd/issues/11970#issuecomment-687875315 for more context.
+ tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey, false)
+
+ if tlsinfo == nil && grpcProxyListenAutoTLS {
+ host := []string{"https://" + grpcProxyListenAddr}
+ dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy")
+ autoTLS, err := transport.SelfCert(lg, dir, host, selfSignedCertValidity)
+ if err != nil {
+ log.Fatal(err)
+ }
+ tlsinfo = &autoTLS
+ }
+ if tlsinfo != nil {
+ lg.Info("gRPC proxy etcd TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsinfo)))
+ }
+ m := mustListenCMux(lg, tlsinfo)
+ grpcl := m.Match(cmux.HTTP2())
+ defer func() {
+ grpcl.Close()
+ lg.Info("stop listening gRPC proxy client requests", zap.String("address", grpcProxyListenAddr))
+ }()
+
+ client := mustNewClient(lg)
+
+ // The proxy client is used for self-healthchecking.
+ // TODO: The mechanism should be refactored to use internal connection.
+ var proxyClient *clientv3.Client
+ if grpcProxyAdvertiseClientURL != "" {
+ proxyClient = mustNewProxyClient(lg, tlsinfo)
+ }
+
+ srvhttp, httpl := mustHTTPListener(lg, m, tlsinfo, client, proxyClient)
+ errc := make(chan error, 3)
+ go func() { errc <- newGRPCProxyServer(lg, client).Serve(grpcl) }()
+ go func() { errc <- srvhttp.Serve(httpl) }()
+ go func() { errc <- m.Serve() }()
+ if len(grpcProxyMetricsListenAddr) > 0 {
+ mhttpl := mustMetricsListener(lg, tlsinfo)
+ go func() {
+ mux := http.NewServeMux()
+ grpcproxy.HandleHealth(lg, mux, client)
+ grpcproxy.HandleProxyHealth(lg, mux, proxyClient)
+ lg.Info("gRPC proxy etcd metrics URL serving")
+ herr := http.Serve(mhttpl, mux)
+ if herr != nil {
+ lg.Fatal("gRPC proxy etcd metrics URL returned", zap.Error(herr))
+ } else {
+ lg.Info("gRPC proxy etcd metrics URL returned")
+ }
+ }()
+ }
+
+ lg.Info("started gRPC proxy", zap.String("address", grpcProxyListenAddr))
+
+ // grpc-proxy is initialized, ready to serve
+ notifySystemd(lg)
+
+ fmt.Fprintln(os.Stderr, <-errc)
+ os.Exit(1)
+}
+
+func checkArgs() {
+ if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL < 1 {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-ttl %d", grpcProxyResolverTTL))
+ os.Exit(1)
+ }
+ if grpcProxyResolverPrefix == "" && grpcProxyResolverTTL > 0 {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-prefix %q", grpcProxyResolverPrefix))
+ os.Exit(1)
+ }
+ if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL > 0 && grpcProxyAdvertiseClientURL == "" {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("invalid advertise-client-url %q", grpcProxyAdvertiseClientURL))
+ os.Exit(1)
+ }
+ if grpcProxyListenAutoTLS && selfSignedCertValidity == 0 {
+ fmt.Fprintln(os.Stderr, fmt.Errorf("selfSignedCertValidity is invalid,it should be greater than 0"))
+ os.Exit(1)
+ }
+}
+
+func mustNewClient(lg *zap.Logger) *clientv3.Client {
+ srvs := discoverEndpoints(lg, grpcProxyDNSCluster, grpcProxyCA, grpcProxyInsecureDiscovery, grpcProxyDNSClusterServiceName)
+ eps := srvs.Endpoints
+ if len(eps) == 0 {
+ eps = grpcProxyEndpoints
+ }
+ cfg, err := newClientCfg(lg, eps)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ cfg.DialOptions = append(cfg.DialOptions,
+ grpc.WithUnaryInterceptor(grpcproxy.AuthUnaryClientInterceptor))
+ cfg.DialOptions = append(cfg.DialOptions,
+ grpc.WithStreamInterceptor(grpcproxy.AuthStreamClientInterceptor))
+ cfg.Logger = lg.Named("client")
+ client, err := clientv3.New(*cfg)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ return client
+}
+
+func mustNewProxyClient(lg *zap.Logger, tls *transport.TLSInfo) *clientv3.Client {
+ eps := []string{grpcProxyAdvertiseClientURL}
+ cfg, err := newProxyClientCfg(lg.Named("client"), eps, tls)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ client, err := clientv3.New(*cfg)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ lg.Info("create proxy client", zap.String("grpcProxyAdvertiseClientURL", grpcProxyAdvertiseClientURL))
+ return client
+}
+
+func newProxyClientCfg(lg *zap.Logger, eps []string, tls *transport.TLSInfo) (*clientv3.Config, error) {
+ cfg := clientv3.Config{
+ Endpoints: eps,
+ DialTimeout: 5 * time.Second,
+ Logger: lg,
+ }
+ if tls != nil {
+ clientTLS, err := tls.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ cfg.TLS = clientTLS
+ }
+ return &cfg, nil
+}
+
+func newClientCfg(lg *zap.Logger, eps []string) (*clientv3.Config, error) {
+ // set tls if any one tls option set
+ cfg := clientv3.Config{
+ Endpoints: eps,
+ DialTimeout: 5 * time.Second,
+ }
+
+ if grpcMaxCallSendMsgSize > 0 {
+ cfg.MaxCallSendMsgSize = grpcMaxCallSendMsgSize
+ }
+ if grpcMaxCallRecvMsgSize > 0 {
+ cfg.MaxCallRecvMsgSize = grpcMaxCallRecvMsgSize
+ }
+
+ tls := newTLS(grpcProxyCA, grpcProxyCert, grpcProxyKey, true)
+ if tls == nil && grpcProxyInsecureSkipTLSVerify {
+ tls = &transport.TLSInfo{}
+ }
+ if tls != nil {
+ clientTLS, err := tls.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ clientTLS.InsecureSkipVerify = grpcProxyInsecureSkipTLSVerify
+ if clientTLS.InsecureSkipVerify {
+ lg.Warn("--insecure-skip-tls-verify was given, this grpc proxy process skips authentication of etcd etcd TLS certificates. This option should be enabled only for testing purposes.")
+ }
+ cfg.TLS = clientTLS
+ lg.Info("gRPC proxy client TLS", zap.String("tls-info", fmt.Sprintf("%+v", tls)))
+ }
+ return &cfg, nil
+}
+
+func newTLS(ca, cert, key string, requireEmptyCN bool) *transport.TLSInfo {
+ if ca == "" && cert == "" && key == "" {
+ return nil
+ }
+ return &transport.TLSInfo{TrustedCAFile: ca, CertFile: cert, KeyFile: key, EmptyCN: requireEmptyCN}
+}
+
+func mustListenCMux(lg *zap.Logger, tlsinfo *transport.TLSInfo) cmux.CMux {
+ l, err := net.Listen("tcp", grpcProxyListenAddr)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ if l, err = transport.NewKeepAliveListener(l, "tcp", nil); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ if tlsinfo != nil {
+ tlsinfo.CRLFile = grpcProxyListenCRL
+ if l, err = transport.NewTLSListener(l, tlsinfo); err != nil {
+ lg.Fatal("failed to create TLS listener", zap.Error(err))
+ }
+ }
+
+ lg.Info("listening for gRPC proxy client requests", zap.String("address", grpcProxyListenAddr))
+ return cmux.New(l)
+}
+
+func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
+ if grpcProxyEnableOrdering {
+ vf := ordering.NewOrderViolationSwitchEndpointClosure(client)
+ client.KV = ordering.NewKV(client.KV, vf)
+ lg.Info("waiting for linearized read from cluster to recover ordering")
+ for {
+ _, err := client.KV.Get(context.TODO(), "_", clientv3.WithKeysOnly())
+ if err == nil {
+ break
+ }
+ lg.Warn("ordering recovery failed, retrying in 1s", zap.Error(err))
+ time.Sleep(time.Second)
+ }
+ }
+
+ if len(grpcProxyNamespace) > 0 {
+ client.KV = namespace.NewKV(client.KV, grpcProxyNamespace)
+ client.Watcher = namespace.NewWatcher(client.Watcher, grpcProxyNamespace)
+ client.Lease = namespace.NewLease(client.Lease, grpcProxyNamespace)
+ }
+
+ if len(grpcProxyLeasing) > 0 {
+ client.KV, _, _ = leasing.NewKV(client, grpcProxyLeasing)
+ }
+
+ kvp, _ := grpcproxy.NewKvProxy(client)
+ watchp, _ := grpcproxy.NewWatchProxy(client.Ctx(), lg, client)
+ if grpcProxyResolverPrefix != "" {
+ grpcproxy.Register(lg, client, grpcProxyResolverPrefix, grpcProxyAdvertiseClientURL, grpcProxyResolverTTL)
+ }
+ clusterp, _ := grpcproxy.NewClusterProxy(lg, client, grpcProxyAdvertiseClientURL, grpcProxyResolverPrefix)
+ leasep, _ := grpcproxy.NewLeaseProxy(client.Ctx(), client)
+
+ mainp := grpcproxy.NewMaintenanceProxy(client)
+ authp := grpcproxy.NewAuthProxy(client)
+ electionp := grpcproxy.NewElectionProxy(client)
+ lockp := grpcproxy.NewLockProxy(client)
+
+ gopts := []grpc.ServerOption{
+ grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
+ grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
+ grpc.MaxConcurrentStreams(math.MaxUint32),
+ }
+ if grpcKeepAliveMinTime > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: grpcKeepAliveMinTime,
+ PermitWithoutStream: false,
+ }))
+ }
+ if grpcKeepAliveInterval > time.Duration(0) ||
+ grpcKeepAliveTimeout > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: grpcKeepAliveInterval,
+ Timeout: grpcKeepAliveTimeout,
+ }))
+ }
+
+ server := grpc.NewServer(gopts...)
+
+ pb.RegisterKVServer(server, kvp)
+ pb.RegisterWatchServer(server, watchp)
+ pb.RegisterClusterServer(server, clusterp)
+ pb.RegisterLeaseServer(server, leasep)
+ pb.RegisterMaintenanceServer(server, mainp)
+ pb.RegisterAuthServer(server, authp)
+ v3electionpb.RegisterElectionServer(server, electionp)
+ v3lockpb.RegisterLockServer(server, lockp)
+
+ return server
+}
+
+func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client, proxy *clientv3.Client) (*http.Server, net.Listener) {
+ httpmux := http.NewServeMux()
+ httpmux.HandleFunc("/", http.NotFound)
+ grpcproxy.HandleHealth(lg, httpmux, c)
+ grpcproxy.HandleProxyHealth(lg, httpmux, proxy)
+ if grpcProxyEnablePprof {
+ for p, h := range debugutil.PProfHandlers() {
+ httpmux.Handle(p, h)
+ }
+ lg.Info("gRPC proxy enabled pprof", zap.String("path", debugutil.HTTPPrefixPProf))
+ }
+ srvhttp := &http.Server{
+ Handler: httpmux,
+ ErrorLog: log.New(ioutil.Discard, "net/http", 0),
+ }
+
+ if tlsinfo == nil {
+ return srvhttp, m.Match(cmux.HTTP1())
+ }
+
+ srvTLS, err := tlsinfo.ServerConfig()
+ if err != nil {
+ lg.Fatal("failed to set up TLS", zap.Error(err))
+ }
+ srvhttp.TLSConfig = srvTLS
+ return srvhttp, m.Match(cmux.Any())
+}
+
+func mustMetricsListener(lg *zap.Logger, tlsinfo *transport.TLSInfo) net.Listener {
+ murl, err := url.Parse(grpcProxyMetricsListenAddr)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "cannot parse %q", grpcProxyMetricsListenAddr)
+ os.Exit(1)
+ }
+ ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsinfo)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ lg.Info("gRPC proxy listening for metrics", zap.String("address", murl.String()))
+ return ml
+}
diff --git a/etcd/etcdmain/help.go b/etcd/etcdmain/help.go
new file mode 100644
index 00000000000..540f8e644d0
--- /dev/null
+++ b/etcd/etcdmain/help.go
@@ -0,0 +1,256 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+ "fmt"
+ "strconv"
+
+ cconfig "github.com/ls-2018/etcd_cn/etcd/config"
+ "github.com/ls-2018/etcd_cn/etcd/embed"
+ "golang.org/x/crypto/bcrypt"
+)
+
+var (
+ usageline = `Usage:
+
+ etcd [flags]
+ Start an etcd etcd.
+
+ etcd --version
+ Show the version of etcd.
+
+ etcd -h | --help
+ Show the help information about etcd.
+
+ etcd --config-file
+ Path to the etcd configuration file. Note that if a configuration file is provided, other command line flags and environment variables will be ignored.
+
+ etcd gateway
+ 启动 L4 TCP网关代理
+
+ etcd grpc-proxy
+ L7 grpc 代理
+`
+ flagsline = `
+Member:
+ --name 'default'
+ 本节点.人类可读的名字
+ --data-dir '${name}.etcd'
+ 服务运行数据保存的路径. ${name}.etcd
+ --wal-dir ''
+ 专用wal目录的路径.默认值:--data-dir的路径下
+ --snapshot-count '100000'
+ 触发快照到磁盘的已提交事务数.
+ --heartbeat-interval '100'
+ 心跳间隔 100ms
+ --election-timeout '1000'
+ 选举超时
+ --initial-election-tick-advance 'true'
+ 是否提前初始化选举时钟启动,以便更快的选举
+ --listen-peer-urls 'http://localhost:2380'
+ 和成员之间通信的地址.用于监听其他etcd member的url
+ --listen-client-urls 'http://localhost:2379'
+ List of URLs to listen on for client traffic.
+ --max-snapshots '` + strconv.Itoa(embed.DefaultMaxSnapshots) + `'
+ 要保留的最大快照文件数(0表示不受限制).5
+ --max-wals '` + strconv.Itoa(embed.DefaultMaxWALs) + `'
+ 要保留的最大wal文件数(0表示不受限制). 5
+ --quota-backend-bytes '0'
+ 当后端大小超过给定配额时(0默认为低空间配额).引发警报.
+ --backend-bbolt-freelist-type 'map'
+ BackendFreelistType指定boltdb后端使用的freelist的类型(array and map是支持的类型). map
+ --backend-batch-interval ''
+ BackendBatchInterval是提交后端事务前的最长时间.
+ --backend-batch-limit '0'
+ BackendBatchLimit是提交后端事务前的最大操作数.
+ --max-txn-ops '128'
+ 事务中允许的最大操作数.
+ --max-request-bytes '1572864'
+ 服务器将接受的最大客户端请求大小(字节).
+ --grpc-keepalive-min-time '5s'
+ 客户端在ping服务器之前应等待的最短持续时间间隔.
+ --grpc-keepalive-interval '2h'
+ 服务器到客户端ping的频率持续时间.以检查连接是否处于活动状态(0表示禁用).
+ --grpc-keepalive-timeout '20s'
+ 关闭非响应连接之前的额外持续等待时间(0表示禁用).20s
+ --socket-reuse-port 'false'
+ 启用在listener上设置套接字选项SO_REUSEPORT.允许重新绑定一个已经在使用的端口.false
+ --socket-reuse-address 'false'
+ 启用在listener上设置套接字选项SO_REUSEADDR 允许重新绑定一个已经在使用的端口 在TIME_WAIT 状态.
+
+Clustering:
+ --initial-advertise-peer-urls 'http://localhost:2380'
+ 集群成员的 URL地址.且会通告群集的其余成员节点.
+ --initial-cluster 'default=http://localhost:2380'
+ 集群中所有节点的信息.
+ --initial-cluster-state 'new'
+ 初始集群状态 ('new' or 'existing').
+ --initial-cluster-token 'etcd-cluster'
+ 创建集群的 token.这个值每个集群保持唯一.
+ --advertise-client-urls 'http://localhost:2379'
+ 监听client的请求
+ The client URLs advertised should be accessible to machines that talk to etcd cluster. etcd client libraries parse these URLs to connect to the cluster.
+ --discovery ''
+ 用于引导群集的发现URL.
+ --discovery-fallback 'proxy'
+ 发现服务失败时的预期行为("退出"或"代理")."proxy"仅支持v2 API. %q
+ --discovery-proxy ''
+ 用于流量到发现服务的HTTP代理.
+ --discovery-srv ''
+ DNS srv域用于引导群集.
+ --discovery-srv-name ''
+ 使用DNS引导时查询的DNS srv名称的后缀.
+ --strict-reconfig-check '` + strconv.FormatBool(embed.DefaultStrictReconfigCheck) + `'
+ 拒绝可能导致仲裁丢失的重新配置请求.true
+ --pre-vote 'true'
+ Enable to run an additional Raft election phase.
+ --auto-compaction-retention '0'
+ Auto compaction retention length. 0 means disable auto compaction.
+ --auto-compaction-mode 'periodic'
+ Interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.
+ --v2-deprecation '` + string(cconfig.V2_DEPR_DEFAULT) + `'
+ Phase of v2store deprecation. Allows to opt-in for higher compatibility mode.
+ Supported values:
+ 'not-yet' // Issues a warning if v2store have meaningful content (default in v3.5)
+ 'write-only' // Custom v2 state is not allowed (planned default in v3.6)
+ 'write-only-drop-data' // Custom v2 state will get DELETED !
+ 'gone' // v2store is not maintained any longer. (planned default in v3.7)
+
+Security:
+ --cert-file ''
+ 客户端证书
+ --key-file ''
+ 客户端私钥
+ --client-cert-auth 'false'
+ 启用客户端证书验证;默认false
+ --client-crl-file ''
+ 客户端证书吊销列表文件的路径.
+ --client-cert-allowed-hostname ''
+ 允许客户端证书认证使用TLS主机名
+ --trusted-ca-file ''
+ 客户端etcd通信 的可信CA证书文件
+ --auto-tls 'false'
+ 节点之间使用生成的证书通信;默认false
+ --peer-cert-file ''
+ 证书路径
+ --peer-key-file ''
+ 私钥路径
+ --peer-client-cert-auth 'false'
+ 启用server客户端证书验证;默认false
+ --peer-trusted-ca-file ''
+ 服务器端ca证书
+ --peer-cert-allowed-cn ''
+ 允许的server客户端证书CommonName
+ --peer-cert-allowed-hostname ''
+ 允许的server客户端证书hostname
+ --peer-auto-tls 'false'
+ 节点之间使用生成的证书通信;默认false
+ --self-signed-cert-validity '1'
+ 客户端证书和同级证书的有效期,单位为年 ;etcd自动生成的 如果指定了ClientAutoTLS and PeerAutoTLS,
+ --peer-crl-file ''
+ 服务端证书吊销列表文件的路径.
+ --cipher-suites ''
+ 客户端/etcds之间支持的TLS加密套件的逗号分隔列表(空将由Go自动填充).
+ --cors '*'
+ Comma-separated whitelist of origins for CORS, or cross-origin resource sharing, (empty or * means allow all).
+ --host-whitelist '*'
+ Acceptable hostnames from HTTP client requests, if etcd is not secure (empty or * means allow all).
+
+Auth:
+ --auth-token 'simple'
+ 指定验证令牌的具体选项. ('simple' or 'jwt')
+ --bcrypt-cost ` + fmt.Sprintf("%d", bcrypt.DefaultCost) + `
+ 为散列身份验证密码指定bcrypt算法的成本/强度.有效值介于4和31之间.
+ --auth-token-ttl 300
+ token过期时间
+
+Profiling and Monitoring:
+ --enable-pprof 'false'
+ 通过HTTP服务器启用运行时分析数据.地址位于客户端URL +"/ debug / pprof /"
+ --metrics 'basic'
+ 设置导出的指标的详细程度,指定"扩展"以包括直方图指标(extensive,basic)
+ --listen-metrics-urls ''
+ List of URLs to listen on for the metrics and health endpoints.
+
+Logging:
+ --logger 'zap'
+ Currently only supports 'zap' for structured logging.
+ --log-outputs 'default'
+ 指定'stdout'或'stderr'以跳过日志记录,即使在systemd或逗号分隔的输出目标列表下运行也是如此.
+ --log-level 'info'
+ 日志等级,只支持 debug, info, warn, error, panic, or fatal. Default 'info'.
+ --enable-log-rotation 'false'
+ 启用单个日志输出文件目标的日志旋转.
+ --log-rotation-config-json '{"maxsize": 100, "maxage": 0, "maxbackups": 0, "localtime": false, "compress": false}'
+ 是用于日志轮换的默认配置. 默认情况下,日志轮换是禁用的. MaxSize(MB), MaxAge(days,0=no limit), MaxBackups(0=no limit), LocalTime(use computers local time), Compress(gzip)".
+
+Experimental distributed tracing:
+ --experimental-enable-distributed-tracing 'false'
+ Enable experimental distributed tracing.
+ --experimental-distributed-tracing-address 'localhost:4317'
+ Distributed tracing collector address.
+ --experimental-distributed-tracing-service-name 'etcd'
+ Distributed tracing service name,必须是same across all etcd instances.
+ --experimental-distributed-tracing-instance-id ''
+ Distributed tracing instance ID,必须是unique per each etcd instance.
+
+v2 Proxy (to be deprecated in v3.6):
+ --proxy 'off'
+ 代理模式设置 ('off', 'readonly' or 'on').
+ --proxy-failure-wait 5000
+ 在重新考虑代理请求之前.endpoints 将处于失败状态的时间(以毫秒为单位).
+ --proxy-refresh-interval 30000
+ endpoints 刷新间隔的时间(以毫秒为单位).
+ --proxy-dial-timeout 1000
+ 拨号超时的时间(以毫秒为单位)或0表示禁用超时
+ --proxy-write-timeout 5000
+ 写入超时的时间(以毫秒为单位)或0以禁用超时.
+ --proxy-read-timeout 0
+ 读取超时的时间(以毫秒为单位)或0以禁用超时.
+
+Experimental feature:
+ --experimental-initial-corrupt-check 'false'
+ Enable to check data corruption before serving any client/peer traffic.
+ --experimental-corrupt-check-time '0s'
+ Duration of time between cluster corruption check passes.
+ --experimental-enable-v2v3 ''
+ Serve v2 requests through the v3 backend under a given prefix. Deprecated and to be decommissioned in v3.6.
+ --experimental-enable-lease-checkpoint 'false'
+ ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
+ --experimental-compaction-batch-limit 1000
+ ExperimentalCompactionBatchLimit sets the maximum revisions deleted in each compaction batch.
+ --experimental-peer-skip-client-san-verification 'false'
+ 跳过server 客户端证书中SAN字段的验证.默认false
+ --experimental-watch-progress-notify-interval '10m'
+ Duration of periodic watch progress notifications.
+ --experimental-warning-apply-duration '100ms'
+ 时间长度.如果应用请求的时间超过这个值.就会产生一个警告.
+ --experimental-txn-mode-write-with-shared-buffer 'true'
+ 启用写事务在其只读检查操作中使用共享缓冲区.
+ --experimental-bootstrap-defrag-threshold-megabytes
+ Enable the defrag during etcd etcd bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.
+
+Unsafe feature:
+ --force-new-cluster 'false'
+ 强制创建新的单成员群集.它提交配置更改,强制删除集群中的所有现有成员并添加自身.需要将其设置为还原备份.
+ --unsafe-no-fsync 'false'
+ 禁用fsync,不安全,会导致数据丢失.
+
+CAUTIOUS with unsafe flag! It may break the guarantees given by the consensus protocol!
+`
+)
+
+// Add back "TO BE DEPRECATED" section if needed
diff --git a/etcd/etcdmain/main.go b/etcd/etcdmain/main.go
new file mode 100644
index 00000000000..9399620a594
--- /dev/null
+++ b/etcd/etcdmain/main.go
@@ -0,0 +1,54 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/coreos/go-systemd/v22/daemon"
+ "go.uber.org/zap"
+)
+
+func Main(args []string) {
+ checkSupportArch() // 检查系统是否支持
+
+ if len(args) > 1 {
+ cmd := args[1]
+ switch cmd {
+ case "gateway", "grpc-proxy":
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+ }
+ return
+ }
+ }
+
+ startEtcdOrProxy(args)
+}
+
+func notifySystemd(lg *zap.Logger) {
+ if lg == nil {
+ lg = zap.NewExample()
+ }
+ lg.Info("通知init守护进程")
+ _, err := daemon.SdNotify(false, daemon.SdNotifyReady)
+ if err != nil {
+ lg.Error("未能通知 systemd 准备就绪", zap.Error(err))
+ return
+ }
+ lg.Info("成功地通知了init守护程序")
+}
diff --git a/etcd/etcdmain/util.go b/etcd/etcdmain/util.go
new file mode 100644
index 00000000000..68b42ebeb57
--- /dev/null
+++ b/etcd/etcdmain/util.go
@@ -0,0 +1,97 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdmain
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/srv"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+
+ "go.uber.org/zap"
+)
+
+func discoverEndpoints(lg *zap.Logger, dns string, ca string, insecure bool, serviceName string) (s srv.SRVClients) {
+ if dns == "" {
+ return s
+ }
+ srvs, err := srv.GetClient("etcd-client", dns, serviceName)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ endpoints := srvs.Endpoints
+
+ if lg != nil {
+ lg.Info(
+ "discovered cluster from SRV",
+ zap.String("srv-etcd", dns),
+ zap.Strings("endpoints", endpoints),
+ )
+ }
+
+ if insecure {
+ return *srvs
+ }
+ // confirm TLS connections are good
+ tlsInfo := transport.TLSInfo{
+ TrustedCAFile: ca,
+ ServerName: dns,
+ }
+
+ if lg != nil {
+ lg.Info(
+ "validating discovered SRV endpoints",
+ zap.String("srv-etcd", dns),
+ zap.Strings("endpoints", endpoints),
+ )
+ }
+
+ endpoints, err = transport.ValidateSecureEndpoints(tlsInfo, endpoints)
+ if err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to validate discovered endpoints",
+ zap.String("srv-etcd", dns),
+ zap.Strings("endpoints", endpoints),
+ zap.Error(err),
+ )
+ }
+ } else {
+ if lg != nil {
+ lg.Info(
+ "using validated discovered SRV endpoints",
+ zap.String("srv-etcd", dns),
+ zap.Strings("endpoints", endpoints),
+ )
+ }
+ }
+
+ // map endpoints back to SRVClients struct with SRV data
+ eps := make(map[string]struct{})
+ for _, ep := range endpoints {
+ eps[ep] = struct{}{}
+ }
+ for i := range srvs.Endpoints {
+ if _, ok := eps[srvs.Endpoints[i]]; !ok {
+ continue
+ }
+ s.Endpoints = append(s.Endpoints, srvs.Endpoints[i])
+ s.SRVs = append(s.SRVs, srvs.SRVs[i])
+ }
+
+ return s
+}
diff --git a/etcd/etcdserver/api/capability.go b/etcd/etcdserver/api/capability.go
new file mode 100644
index 00000000000..95bec6aef4c
--- /dev/null
+++ b/etcd/etcdserver/api/capability.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "sync"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ "go.uber.org/zap"
+)
+
+type Capability string
+
+const (
+ AuthCapability Capability = "auth"
+ V3rpcCapability Capability = "v3rpc"
+)
+
+var (
+ // capabilityMaps is a static map of version to capability map.
+ capabilityMaps = map[string]map[Capability]bool{
+ "3.0.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.1.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.2.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.3.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.4.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.5.0": {AuthCapability: true, V3rpcCapability: true},
+ }
+
+ enableMapMu sync.RWMutex
+ // enabledMap points to a map in capabilityMaps
+ enabledMap map[Capability]bool
+
+ curVersion *semver.Version
+)
+
+func init() {
+ enabledMap = map[Capability]bool{
+ AuthCapability: true, // auth
+ V3rpcCapability: true, // v3rpc
+ }
+}
+
+// UpdateCapability 当集群的版本增加时,更新enabledMap.
+func UpdateCapability(lg *zap.Logger, v *semver.Version) {
+ if v == nil {
+ // if recovered but version was never set by cluster
+ return
+ }
+ enableMapMu.Lock()
+ if curVersion != nil && !membership.IsValidVersionChange(v, curVersion) {
+ enableMapMu.Unlock()
+ return
+ }
+ curVersion = v
+ enabledMap = capabilityMaps[curVersion.String()]
+ enableMapMu.Unlock()
+
+ if lg != nil {
+ lg.Info(
+ "enabled capabilities for version",
+ zap.String("cluster-version", version.Cluster(v.String())),
+ )
+ }
+}
+
+func IsCapabilityEnabled(c Capability) bool {
+ enableMapMu.RLock()
+ defer enableMapMu.RUnlock()
+ if enabledMap == nil {
+ return false
+ }
+ return enabledMap[c]
+}
diff --git a/etcd/etcdserver/api/etcdhttp/a.go b/etcd/etcdserver/api/etcdhttp/a.go
new file mode 100644
index 00000000000..b764db7ba9d
--- /dev/null
+++ b/etcd/etcdserver/api/etcdhttp/a.go
@@ -0,0 +1,205 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/raft"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "go.uber.org/zap"
+)
+
+const (
+ PathMetrics = "/metrics"
+ PathHealth = "/health"
+ PathProxyMetrics = "/proxy/metrics"
+ PathProxyHealth = "/proxy/health"
+)
+
+// HandleMetricsHealth registers metrics and health handlers.
+func HandleMetricsHealth(lg *zap.Logger, mux *http.ServeMux, srv etcdserver.ServerV2) {
+ mux.Handle(PathMetrics, promhttp.Handler())
+ mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet) Health { return checkV2Health(lg, srv, excludedAlarms) }))
+}
+
+// HandleMetricsHealthForV3 registers metrics and health handlers. it checks health by using v3 range request
+// and its corresponding timeout.
+func HandleMetricsHealthForV3(lg *zap.Logger, mux *http.ServeMux, srv *etcdserver.EtcdServer) {
+ mux.Handle(PathMetrics, promhttp.Handler())
+ mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet) Health {
+ return checkV3Health(lg, srv, excludedAlarms, true)
+ }))
+}
+
+// HandlePrometheus registers prometheus handler on '/metrics'.
+func HandlePrometheus(mux *http.ServeMux) {
+ mux.Handle(PathMetrics, promhttp.Handler())
+}
+
+// NewHealthHandler handles '/health' requests.
+func NewHealthHandler(lg *zap.Logger, hfunc func(excludedAlarms AlarmSet) Health) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ lg.Warn("/health error", zap.Int("status-code", http.StatusMethodNotAllowed))
+ return
+ }
+ excludedAlarms := getExcludedAlarms(r)
+ // Passing the query parameter "serializable=true" ensures that the
+ // health of the local etcd is checked vs the health of the cluster.
+ // This is useful for probes attempting to validate the liveness of
+ // the etcd process vs readiness of the cluster to serve requests.
+ // serializableFlag := getSerializableFlag(r)
+ h := hfunc(excludedAlarms)
+ defer func() {
+ if h.Health == "true" {
+ healthSuccess.Inc()
+ } else {
+ healthFailed.Inc()
+ }
+ }()
+ d, _ := json.Marshal(h)
+ if h.Health != "true" {
+ http.Error(w, string(d), http.StatusServiceUnavailable)
+ lg.Warn("/health error", zap.String("output", string(d)), zap.Int("status-code", http.StatusServiceUnavailable))
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+ w.Write(d)
+ lg.Debug("/health OK", zap.Int("status-code", http.StatusOK))
+ }
+}
+
+var (
+ healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "health_success",
+ Help: "The total number of successful health checks",
+ })
+ healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "health_failures",
+ Help: "The total number of failed health checks",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(healthSuccess)
+ prometheus.MustRegister(healthFailed)
+}
+
+// Health defines etcd server health status.
+// TODO: remove manual parsing in etcdctl cluster-health
+type Health struct {
+ Health string `json:"health"`
+ Reason string `json:"reason"`
+}
+
+type AlarmSet map[string]struct{}
+
+func getExcludedAlarms(r *http.Request) (alarms AlarmSet) {
+ alarms = make(map[string]struct{}, 2)
+ alms, found := r.URL.Query()["exclude"]
+ if found {
+ for _, alm := range alms {
+ if len(alm) == 0 {
+ continue
+ }
+ alarms[alm] = struct{}{}
+ }
+ }
+ return alarms
+}
+
+func checkHealth(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSet, serializable bool) Health {
+ h := Health{}
+ h.Health = "true"
+ as := srv.Alarms()
+ if len(as) > 0 {
+ for _, v := range as {
+ alarmName := v.Alarm.String()
+ if _, found := excludedAlarms[alarmName]; found {
+ lg.Debug("/health excluded alarm", zap.String("alarm", v.String()))
+ continue
+ }
+
+ h.Health = "false"
+ switch v.Alarm {
+ case etcdserverpb.AlarmType_NOSPACE:
+ h.Reason = "ALARM NOSPACE"
+ case etcdserverpb.AlarmType_CORRUPT:
+ h.Reason = "ALARM CORRUPT"
+ default:
+ h.Reason = "ALARM UNKNOWN"
+ }
+ lg.Warn("serving /health false due to an alarm", zap.String("alarm", v.String()))
+ return h
+ }
+ }
+
+ if !serializable && (uint64(srv.Leader()) == raft.None) {
+ h.Health = "false"
+ h.Reason = "RAFT NO LEADER"
+ lg.Warn("serving /health false; no leader")
+ return h
+ }
+ return h
+}
+
+func checkV2Health(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSet) (h Health) {
+ if h = checkHealth(lg, srv, excludedAlarms, false); h.Health != "true" {
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ _, err := srv.Do(ctx, etcdserverpb.Request{Method: "QGET"})
+ cancel()
+ if err != nil {
+ h.Health = "false"
+ h.Reason = fmt.Sprintf("QGET ERROR:%s", err)
+ lg.Warn("serving /health false; QGET fails", zap.Error(err))
+ return
+ }
+ lg.Debug("serving /health true")
+ return
+}
+
+func checkV3Health(lg *zap.Logger, srv *etcdserver.EtcdServer, excludedAlarms AlarmSet, serializable bool) (h Health) {
+ if h = checkHealth(lg, srv, excludedAlarms, serializable); h.Health != "true" {
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), srv.Cfg.ReqTimeout())
+ _, err := srv.Range(ctx, &etcdserverpb.RangeRequest{KeysOnly: true, Limit: 1, Serializable: serializable})
+ cancel()
+ if err != nil && err != auth.ErrUserEmpty && err != auth.ErrPermissionDenied {
+ h.Health = "false"
+ h.Reason = fmt.Sprintf("RANGE ERROR:%s", err)
+ lg.Warn("serving /health false; Range fails", zap.Error(err))
+ return
+ }
+ lg.Debug("serving /health true")
+ return
+}
diff --git a/etcd/etcdserver/api/etcdhttp/api.go b/etcd/etcdserver/api/etcdhttp/api.go
new file mode 100644
index 00000000000..d60c097c2e7
--- /dev/null
+++ b/etcd/etcdserver/api/etcdhttp/api.go
@@ -0,0 +1,64 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "net/http"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp"
+ "github.com/ls-2018/etcd_cn/etcd/lease/leasehttp"
+
+ "go.uber.org/zap"
+)
+
+const (
+ peerMembersPath = "/members"
+ peerMemberPromotePrefix = "/members/promote/"
+)
+
+// NewPeerHandler 生成 http.Handler 处理客户端请求
+func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeerV2) http.Handler {
+ return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler(), s.HashKVHandler(), s.DowngradeEnabledHandler())
+}
+
+func newPeerHandler(lg *zap.Logger, s etcdserver.Server, raftHandler http.Handler,
+ leaseHandler http.Handler, hashKVHandler http.Handler, downgradeEnabledHandler http.Handler,
+) http.Handler {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ peerMembersHandler := newPeerMembersHandler(lg, s.Cluster()) // ✅
+ peerMemberPromoteHandler := newPeerMemberPromoteHandler(lg, s) // ✅
+
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", http.NotFound)
+ mux.Handle(rafthttp.RaftPrefix, raftHandler) // /raft
+ mux.Handle(rafthttp.RaftPrefix+"/", raftHandler) //
+ mux.Handle(peerMembersPath, peerMembersHandler) // /members
+ mux.Handle(peerMemberPromotePrefix, peerMemberPromoteHandler) // /members/promote
+ if leaseHandler != nil {
+ mux.Handle(leasehttp.LeasePrefix, leaseHandler) // /leases
+ mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler) // /leases/internal
+ }
+ if downgradeEnabledHandler != nil {
+ mux.Handle(etcdserver.DowngradeEnabledPath, downgradeEnabledHandler) // /downgrade/enabled
+ }
+ if hashKVHandler != nil {
+ mux.Handle(etcdserver.PeerHashKVPath, hashKVHandler) // /members/hashkv
+ }
+ mux.HandleFunc(versionPath, versionHandler(s.Cluster(), serveVersion))
+ return mux
+}
diff --git a/etcd/etcdserver/api/etcdhttp/over_base.go b/etcd/etcdserver/api/etcdhttp/over_base.go
new file mode 100644
index 00000000000..e1fe8f338e7
--- /dev/null
+++ b/etcd/etcdserver/api/etcdhttp/over_base.go
@@ -0,0 +1,144 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "encoding/json"
+ "expvar"
+ "fmt"
+ "net/http"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ "go.uber.org/zap"
+)
+
+const (
+ varsPath = "/debug/vars"
+ versionPath = "/version"
+)
+
+// HandleBasic 添加处理程序到一个mux服务JSON etcd客户端请求不访问v2存储.
+func HandleBasic(lg *zap.Logger, mux *http.ServeMux, server etcdserver.ServerPeer) {
+ mux.HandleFunc(varsPath, serveVars)
+ mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) // {"etcdserver":"3.5.2","etcdcluster":"3.5.0"}
+}
+
+// ok
+func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+ vs := version.Versions{
+ Server: version.Version,
+ Cluster: clusterV,
+ }
+ // clusterV = server.Cluster().String()
+ // {"etcdserver":"3.5.2","etcdcluster":"3.5.0"}
+
+ w.Header().Set("Content-Type", "application/json")
+ b, err := json.Marshal(&vs)
+ if err != nil {
+ panic(fmt.Sprintf("序列化失败 (%v)", err))
+ }
+ w.Write(b)
+}
+
+// ok
+func serveVars(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ fmt.Fprintf(w, "{\n")
+ first := true
+ expvar.Do(func(kv expvar.KeyValue) { // 同一时刻只能有一个请求执行
+ if !first {
+ fmt.Fprintf(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprintf(w, "\n}\n")
+}
+
+// ok
+func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
+ if m == r.Method {
+ return true
+ }
+ w.Header().Set("Allow", m)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return false
+}
+
+func WriteError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) {
+ if err == nil {
+ return
+ }
+ switch e := err.(type) {
+ case *v2error.Error:
+ e.WriteTo(w)
+
+ case *httptypes.HTTPError:
+ if et := e.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "写失败 v2 HTTP",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-etcd-error", e.Error()),
+ zap.Error(et),
+ )
+ }
+ }
+
+ default:
+ switch err {
+ case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers,
+ etcdserver.ErrUnhealthy:
+ if lg != nil {
+ lg.Warn(
+ "v2 响应错误",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-etcd-error", err.Error()),
+ )
+ }
+
+ default:
+ if lg != nil {
+ lg.Warn(
+ "未知的v2响应错误",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-etcd-error", err.Error()),
+ )
+ }
+ }
+
+ herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
+ if et := herr.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "写失败 v2 HTTP",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-etcd-error", err.Error()),
+ zap.Error(et),
+ )
+ }
+ }
+ }
+}
diff --git a/etcd/etcdserver/api/etcdhttp/over_member_api.go b/etcd/etcdserver/api/etcdhttp/over_member_api.go
new file mode 100644
index 00000000000..2558b8eaed5
--- /dev/null
+++ b/etcd/etcdserver/api/etcdhttp/over_member_api.go
@@ -0,0 +1,33 @@
+package etcdhttp
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "go.uber.org/zap"
+)
+
+func newPeerMembersHandler(lg *zap.Logger, cluster api.Cluster) http.Handler {
+ return &peerMembersHandler{
+ lg: lg,
+ cluster: cluster,
+ }
+}
+
+func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ if r.URL.Path != peerMembersPath {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+ ms := h.cluster.Members()
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(ms); err != nil {
+ h.lg.Warn("编码成员信息失败", zap.Error(err))
+ }
+}
diff --git a/etcd/etcdserver/api/etcdhttp/over_promoter_member_api.go b/etcd/etcdserver/api/etcdhttp/over_promoter_member_api.go
new file mode 100644
index 00000000000..ca091d4ba3d
--- /dev/null
+++ b/etcd/etcdserver/api/etcdhttp/over_promoter_member_api.go
@@ -0,0 +1,74 @@
+package etcdhttp
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "go.uber.org/zap"
+)
+
+type peerMembersHandler struct {
+ lg *zap.Logger
+ cluster api.Cluster
+}
+
+func newPeerMemberPromoteHandler(lg *zap.Logger, s etcdserver.Server) http.Handler {
+ return &peerMemberPromoteHandler{
+ lg: lg,
+ cluster: s.Cluster(),
+ server: s,
+ }
+}
+
+type peerMemberPromoteHandler struct {
+ lg *zap.Logger
+ cluster api.Cluster
+ server etcdserver.Server
+}
+
+func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "POST") {
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ if !strings.HasPrefix(r.URL.Path, peerMemberPromotePrefix) {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+ idStr := strings.TrimPrefix(r.URL.Path, peerMemberPromotePrefix)
+ id, err := strconv.ParseUint(idStr, 10, 64)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("成员 %s 不在集群中", idStr), http.StatusNotFound)
+ return
+ }
+
+ resp, err := h.server.PromoteMember(r.Context(), id)
+ if err != nil {
+ switch err {
+ case membership.ErrIDNotFound:
+ http.Error(w, err.Error(), http.StatusNotFound)
+ case membership.ErrMemberNotLearner:
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ case etcdserver.ErrLearnerNotReady:
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ default:
+ WriteError(h.lg, w, r, err)
+ }
+ h.lg.Warn("提升成员失败", zap.String("member-id", types.ID(id).String()), zap.Error(err))
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ if err := json.NewEncoder(w).Encode(resp); err != nil {
+ h.lg.Warn("编码成员信息失败", zap.Error(err))
+ }
+}
diff --git a/etcd/etcdserver/api/etcdhttp/over_version_api.go b/etcd/etcdserver/api/etcdhttp/over_version_api.go
new file mode 100644
index 00000000000..f9dba99935e
--- /dev/null
+++ b/etcd/etcdserver/api/etcdhttp/over_version_api.go
@@ -0,0 +1,19 @@
+package etcdhttp
+
+import (
+ "net/http"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+)
+
+// ok
+func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ v := c.Version()
+ if v != nil {
+ fn(w, r, v.String())
+ } else {
+ fn(w, r, "not_decided")
+ }
+ }
+}
diff --git a/etcd/etcdserver/api/membership/errors.go b/etcd/etcdserver/api/membership/errors.go
new file mode 100644
index 00000000000..fb6add09d01
--- /dev/null
+++ b/etcd/etcdserver/api/membership/errors.go
@@ -0,0 +1,35 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "errors"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+)
+
+var (
+ ErrIDRemoved = errors.New("membership: ID 已移除")
+ ErrIDExists = errors.New("membership: ID 存在")
+ ErrIDNotFound = errors.New("membership: ID 没有找到")
+ ErrPeerURLexists = errors.New("membership: peerURL 已存在")
+ ErrMemberNotLearner = errors.New("membership: 只能提升一个learner成员")
+ ErrTooManyLearners = errors.New("membership: 集群中成员太多")
+)
+
+func isKeyNotFound(err error) bool {
+ e, ok := err.(*v2error.Error)
+ return ok && e.ErrorCode == v2error.EcodeKeyNotFound
+}
diff --git a/etcd/etcdserver/api/membership/over_cluster.go b/etcd/etcdserver/api/membership/over_cluster.go
new file mode 100644
index 00000000000..8eaf6be6a53
--- /dev/null
+++ b/etcd/etcdserver/api/membership/over_cluster.go
@@ -0,0 +1,661 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha1"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "path"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ "github.com/ls-2018/etcd_cn/pkg/netutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+const maxLearners = 1
+
+// RaftCluster raft集群成员
+type RaftCluster struct {
+ lg *zap.Logger
+ localID types.ID // 本机节点ID
+ cid types.ID // 集群ID,根据所有初始 memberID hash 得到的
+ v2store v2store.Store // 内存里面的一个树形node结构
+ be backend.Backend //
+ sync.Mutex // 守住下面的字段
+ version *semver.Version //
+ members map[types.ID]*Member //
+ removed map[types.ID]bool // 记录被删除的节点ID,删除后的节点无法重用
+ downgradeInfo *DowngradeInfo // 降级信息
+}
+
+type ConfigChangeContext struct {
+ Member
+ IsPromote bool `json:"isPromote"` // 是否提升learner
+}
+
+type ShouldApplyV3 bool
+
+const (
+ ApplyBoth = ShouldApplyV3(true)
+ ApplyV2storeOnly = ShouldApplyV3(false)
+)
+
+// Recover 接收到快照之后,会调用此函数
+func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.v2store != nil {
+ c.version = clusterVersionFromStore(c.lg, c.v2store)
+ c.members, c.removed = membersFromStore(c.lg, c.v2store)
+ } else {
+ c.version = clusterVersionFromBackend(c.lg, c.be)
+ c.members, c.removed = membersFromBackend(c.lg, c.be)
+ }
+
+ if c.be != nil {
+ c.downgradeInfo = downgradeInfoFromBackend(c.lg, c.be)
+ }
+ d := &DowngradeInfo{Enabled: false}
+ if c.downgradeInfo != nil {
+ d = &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion}
+ }
+ mustDetectDowngrade(c.lg, c.version, d) // 检测版本降级
+ onSet(c.lg, c.version)
+
+ for _, m := range c.members {
+ c.lg.Info(
+ "从store中恢复/增加成员", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("recovered-remote-peer-id", m.ID.String()), zap.Strings("recovered-remote-peer-urls", m.PeerURLs),
+ )
+ }
+ if c.version != nil {
+ c.lg.Info("从store获取version,并设置", zap.String("cluster-version", version.Cluster(c.version.String())))
+ }
+}
+
+// NewClusterFromMembers 从远端节点获取到的集群节点信息
+func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member) *RaftCluster {
+ c := NewCluster(lg)
+ c.cid = id
+ for _, m := range membs {
+ c.members[m.ID] = m
+ }
+ return c
+}
+
+// UpdateAttributes 更新成员属性
+func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+
+ if m, ok := c.members[id]; ok {
+ m.Attributes = attr
+ if c.v2store != nil {
+ mustUpdateMemberAttrInStore(c.lg, c.v2store, m)
+ }
+ if c.be != nil && shouldApplyV3 {
+ unsafeSaveMemberToBackend(c.lg, c.be, m)
+ }
+ return
+ }
+
+ _, ok := c.removed[id]
+ if !ok {
+ c.lg.Panic("更新失败", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("unknown-remote-peer-id", id.String()))
+ }
+
+ c.lg.Warn("移除的成员 不进行属性更新", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("updated-peer-id", id.String()))
+}
+
+// ValidateClusterAndAssignIDs 通过匹配PeerURLs来验证本地集群与现有集群是否匹配.如果验证成功,它将把现有集群的IDs归入本地集群.
+// 如果验证失败,将返回一个错误.
+func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *RaftCluster) error {
+ ems := existing.Members()
+ lms := local.Members()
+ if len(ems) != len(lms) {
+ return fmt.Errorf("成员数量不一致")
+ }
+
+ ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+ defer cancel()
+ for i := range ems {
+ var err error
+ ok := false
+ for j := range lms {
+ if ok, err = netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[j].PeerURLs); ok {
+ lms[j].ID = ems[i].ID
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("PeerURLs: 没有找到匹配的现有成员(%v, %v),最后的解析器错误(%v).", ems[i].ID, ems[i].PeerURLs, err)
+ }
+ }
+ local.members = make(map[types.ID]*Member)
+ for _, m := range lms {
+ local.members[m.ID] = m
+ }
+ return nil
+}
+
+func (c *RaftCluster) ID() types.ID { return c.cid }
+
+func (c *RaftCluster) Members() []*Member {
+ c.Lock()
+ defer c.Unlock()
+ var ms MembersByID
+ for _, m := range c.members {
+ ms = append(ms, m.Clone())
+ }
+ sort.Sort(ms)
+ return []*Member(ms)
+}
+
+// Member ok
+func (c *RaftCluster) Member(id types.ID) *Member {
+ c.Lock()
+ defer c.Unlock()
+ return c.members[id].Clone()
+}
+
+// 从v2Store中获取所有的集群节点
+func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, map[types.ID]bool) {
+ members := make(map[types.ID]*Member)
+ removed := make(map[types.ID]bool)
+ e, err := st.Get(StoreMembersPrefix, true, true) // 获取/0/members 事件
+ if err != nil {
+ if isKeyNotFound(err) { // 不存在 /0/members节点
+ return members, removed
+ }
+ lg.Panic("从store获取成员失败", zap.String("path", StoreMembersPrefix), zap.Error(err))
+ }
+ for _, n := range e.NodeExtern.ExternNodes {
+ var m *Member
+ m, err = nodeToMember(lg, n)
+ if err != nil {
+ lg.Panic("node--->member失败", zap.Error(err))
+ }
+ members[m.ID] = m
+ }
+
+ e, err = st.Get(storeRemovedMembersPrefix, true, true) // 获取/0/removed_members 事件
+ if err != nil {
+ if isKeyNotFound(err) {
+ return members, removed
+ }
+ lg.Panic("从store中获取移除节点失败", zap.String("path", storeRemovedMembersPrefix), zap.Error(err))
+ }
+ for _, n := range e.NodeExtern.ExternNodes {
+ removed[MustParseMemberIDFromKey(lg, n.Key)] = true
+ }
+ return members, removed
+}
+
+func (c *RaftCluster) IsIDRemoved(id types.ID) bool {
+ c.Lock()
+ defer c.Unlock()
+ return c.removed[id]
+}
+
+func (c *RaftCluster) String() string {
+ c.Lock()
+ defer c.Unlock()
+ b := &bytes.Buffer{}
+ fmt.Fprintf(b, "{ClusterID:%s ", c.cid)
+ var ms []string
+ for _, m := range c.members {
+ ms = append(ms, fmt.Sprintf("%+v", m))
+ }
+ fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " "))
+ var ids []string
+ for id := range c.removed {
+ ids = append(ids, id.String())
+ }
+ fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " "))
+ return b.String()
+}
+
+// 生成集群ID
+func (c *RaftCluster) genID() {
+ mIDs := c.MemberIDs() // 返回所有成员iD
+ b := make([]byte, 8*len(mIDs))
+ //[id,id,id,id,id,id,id]
+ for i, id := range mIDs {
+ binary.BigEndian.PutUint64(b[8*i:], uint64(id))
+ }
+ hash := sha1.Sum(b)
+ c.cid = types.ID(binary.BigEndian.Uint64(hash[:8]))
+}
+
+// UpdateRaftAttributes 节点的属性更新
+func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.members[id].RaftAttributes = raftAttr
+ if c.v2store != nil {
+ mustUpdateMemberInStore(c.lg, c.v2store, c.members[id])
+ }
+ if c.be != nil && shouldApplyV3 {
+ unsafeSaveMemberToBackend(c.lg, c.be, c.members[id])
+ }
+
+ c.lg.Info("更新成员属性", zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs),
+ )
+}
+
+// MemberByName 返回一个具有给定名称的成员
+func (c *RaftCluster) MemberByName(name string) *Member {
+ c.Lock()
+ defer c.Unlock()
+ var memb *Member
+ for _, m := range c.members {
+ if m.Name == name {
+ if memb != nil {
+ c.lg.Panic("发现了两个相同名称的成员", zap.String("name", name))
+ }
+ memb = m
+ }
+ }
+ return memb.Clone()
+}
+
+// MemberIDs 返回所有成员iD
+func (c *RaftCluster) MemberIDs() []types.ID {
+ c.Lock()
+ defer c.Unlock()
+ var ids []types.ID
+ for _, m := range c.members {
+ ids = append(ids, m.ID)
+ }
+ sort.Sort(types.IDSlice(ids))
+ return ids
+}
+
+// SetID 设置ID
+func (c *RaftCluster) SetID(localID, cid types.ID) {
+ c.localID = localID
+ c.cid = cid
+}
+
+// SetStore OK
+func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st }
+
+func (c *RaftCluster) SetBackend(be backend.Backend) {
+ c.be = be
+ mustCreateBackendBuckets(c.be)
+}
+
+// VotingMembers 集群中的可投票成员
+func (c *RaftCluster) VotingMembers() []*Member {
+ c.Lock()
+ defer c.Unlock()
+ var ms MembersByID
+ for _, m := range c.members {
+ if !m.IsLearner {
+ ms = append(ms, m.Clone())
+ }
+ }
+ sort.Sort(ms)
+ return []*Member(ms)
+}
+
+// Version 集群版本
+func (c *RaftCluster) Version() *semver.Version {
+ c.Lock()
+ defer c.Unlock()
+ if c.version == nil {
+ return nil
+ }
+ return semver.Must(semver.NewVersion(c.version.String()))
+}
+
+// SetVersion 设置集群版本
+func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *semver.Version), shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+ if c.version != nil {
+ c.lg.Info("更新集群版本",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("from", version.Cluster(c.version.String())),
+ zap.String("to", version.Cluster(ver.String())),
+ )
+ } else {
+ c.lg.Info("设置初始集群版本",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("cluster-version", version.Cluster(ver.String())),
+ )
+ }
+ c.version = ver
+ mustDetectDowngrade(c.lg, c.version, c.downgradeInfo)
+ if c.v2store != nil {
+ mustSaveClusterVersionToStore(c.lg, c.v2store, ver)
+ }
+ if c.be != nil && shouldApplyV3 {
+ mustSaveClusterVersionToBackend(c.be, ver)
+ }
+ onSet(c.lg, ver)
+}
+
+// NewClusterFromURLsMap 使用提供的url映射创建一个新的raft集群.目前,该算法不支持使用raft learner成员创建集群.
+func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) (*RaftCluster, error) {
+ c := NewCluster(lg) // RaftCluster struct
+ for name, urls := range urlsmap {
+ m := NewMember(name, urls, token, nil)
+ if _, ok := c.members[m.ID]; ok {
+ return nil, fmt.Errorf(" %v", m)
+ }
+ if uint64(m.ID) == raft.None {
+ return nil, fmt.Errorf("不能使用 %x作为成员ID", raft.None)
+ }
+ c.members[m.ID] = m
+ }
+ c.genID() // 生成集群ID
+ return c, nil
+}
+
+// PeerURLs 返回所有成员的通信地址
+func (c *RaftCluster) PeerURLs() []string {
+ c.Lock()
+ defer c.Unlock()
+ urls := make([]string, 0)
+ for _, p := range c.members {
+ urls = append(urls, p.PeerURLs...)
+ }
+ sort.Strings(urls)
+ return urls
+}
+
+func NewCluster(lg *zap.Logger) *RaftCluster {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &RaftCluster{
+ lg: lg,
+ members: make(map[types.ID]*Member),
+ removed: make(map[types.ID]bool),
+ downgradeInfo: &DowngradeInfo{Enabled: false},
+ }
+}
+
+func clusterVersionFromBackend(lg *zap.Logger, be backend.Backend) *semver.Version {
+ ckey := backendClusterVersionKey()
+ tx := be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ keys, vals := tx.UnsafeRange(buckets.Cluster, ckey, nil, 0) // 从集群获取 获取 clusterVersion
+ if len(keys) == 0 {
+ return nil
+ }
+ if len(keys) != 1 {
+ lg.Panic("从后端获取集群版本时,键的数量超出预期", zap.Int("number-of-key", len(keys)))
+ }
+ return semver.Must(semver.NewVersion(string(vals[0])))
+}
+
+func downgradeInfoFromBackend(lg *zap.Logger, be backend.Backend) *DowngradeInfo {
+ dkey := backendDowngradeKey()
+ tx := be.ReadTx()
+ tx.Lock()
+ defer tx.Unlock()
+ keys, vals := tx.UnsafeRange(buckets.Cluster, dkey, nil, 0) // 从集群获取 获取 downgrade
+
+ if len(keys) == 0 {
+ return nil
+ }
+
+ if len(keys) != 1 {
+ lg.Panic(
+ "unexpected number of keys when getting cluster version from backend",
+ zap.Int("number-of-key", len(keys)),
+ )
+ }
+ var d DowngradeInfo
+ if err := json.Unmarshal([]byte(vals[0]), &d); err != nil {
+ lg.Panic("反序列化失败", zap.Error(err))
+ }
+ if d.Enabled {
+ if _, err := semver.NewVersion(d.TargetVersion); err != nil {
+ lg.Panic(
+ "降级目标版本的版本格式出乎意料",
+ zap.String("target-version", d.TargetVersion),
+ )
+ }
+ }
+ return &d
+}
+
+func (c *RaftCluster) IsMemberExist(id types.ID) bool {
+ c.Lock()
+ defer c.Unlock()
+ _, ok := c.members[id]
+ return ok
+}
+
+func (c *RaftCluster) VotingMemberIDs() []types.ID {
+ c.Lock()
+ defer c.Unlock()
+ var ids []types.ID
+ for _, m := range c.members {
+ if !m.IsLearner {
+ ids = append(ids, m.ID)
+ }
+ }
+ sort.Sort(types.IDSlice(ids))
+ return ids
+}
+
+func (c *RaftCluster) IsLocalMemberLearner() bool {
+ c.Lock()
+ defer c.Unlock()
+ localMember, ok := c.members[c.localID]
+ if !ok {
+ c.lg.Panic("无法查找到本地节点", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()))
+ }
+ return localMember.IsLearner
+}
+
+func (c *RaftCluster) DowngradeInfo() *DowngradeInfo {
+ c.Lock()
+ defer c.Unlock()
+ if c.downgradeInfo == nil {
+ return &DowngradeInfo{Enabled: false}
+ }
+ d := &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion}
+ return d
+}
+
+func (c *RaftCluster) SetDowngradeInfo(d *DowngradeInfo, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.be != nil && shouldApplyV3 {
+ mustSaveDowngradeToBackend(c.lg, c.be, d)
+ }
+
+ c.downgradeInfo = d
+
+ if d.Enabled {
+ c.lg.Info(
+ "The etcd is ready to downgrade",
+ zap.String("target-version", d.TargetVersion),
+ zap.String("etcd-version", version.Version),
+ )
+ }
+}
+
+// PushMembershipToStorage 是覆盖集群成员的存储信息,使其完全反映RaftCluster的内部存储.
+func (c *RaftCluster) PushMembershipToStorage() {
+ if c.be != nil {
+ TrimMembershipFromBackend(c.lg, c.be)
+ for _, m := range c.members {
+ unsafeSaveMemberToBackend(c.lg, c.be, m)
+ }
+ }
+ if c.v2store != nil {
+ TrimMembershipFromV2Store(c.lg, c.v2store)
+ for _, m := range c.members {
+ mustSaveMemberToStore(c.lg, c.v2store, m)
+ }
+ }
+}
+
+func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version {
+ e, err := st.Get(path.Join(storePrefix, "version"), false, false)
+ if err != nil {
+ if isKeyNotFound(err) {
+ return nil
+ }
+ lg.Panic("从store获取集群版本信息失败", zap.String("path", path.Join(storePrefix, "version")), zap.Error(err))
+ }
+ return semver.Must(semver.NewVersion(*e.NodeExtern.Value))
+}
+
+// IsValidVersionChange 检查两种情况下的版本变更是否有效.
+// 1.降级:集群版本比本地版本高一个小版本.集群版本应该改变.
+// 2.集群启动:当不是所有成员的版本都可用时,集群版本被设置为MinVersion(3.0),当所有成员都在较高版本时,集群版本低于本地版本时,簇的版本应该改变.
+func IsValidVersionChange(cv *semver.Version, lv *semver.Version) bool {
+ // 集群版本
+ cv = &semver.Version{Major: cv.Major, Minor: cv.Minor}
+ // 本地版本
+ lv = &semver.Version{Major: lv.Major, Minor: lv.Minor}
+
+ if isValidDowngrade(cv, lv) || (cv.Major == lv.Major && cv.LessThan(*lv)) {
+ return true
+ }
+ return false
+}
+
+// ValidateConfigurationChange 验证接受 提议的ConfChange 并确保它仍然有效.
+func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChangeV1) error {
+ members, removed := membersFromStore(c.lg, c.v2store) // 从v2store中获取所有成员
+ // members 包括leader、follower、learner、候选者
+ id := types.ID(cc.NodeID)
+ if removed[id] { // 不能在移除的节点中
+ return ErrIDRemoved
+ }
+ _ = cc.Context // ConfigChangeContext Member 的序列化数据
+ switch cc.Type {
+ case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode:
+ confChangeContext := new(ConfigChangeContext)
+ if err := json.Unmarshal([]byte(cc.Context), confChangeContext); err != nil {
+ c.lg.Panic("发序列化失败confChangeContext", zap.Error(err))
+ }
+ if confChangeContext.IsPromote { // 将一个learner提升为投票节点, 那他应该是learner
+ if members[id] == nil {
+ return ErrIDNotFound
+ }
+ if !members[id].IsLearner {
+ return ErrMemberNotLearner
+ }
+ } else { // 添加新节点
+ if members[id] != nil {
+ return ErrIDExists
+ }
+
+ urls := make(map[string]bool) // 当前集群所有节点的通信地址
+ for _, m := range members {
+ for _, u := range m.PeerURLs {
+ urls[u] = true
+ }
+ }
+ // 检查peer地址是否已存在
+ for _, u := range confChangeContext.Member.PeerURLs {
+ if urls[u] {
+ return ErrPeerURLexists
+ }
+ }
+
+ if confChangeContext.Member.IsLearner { // 新加入的节点时learner
+ numLearners := 0
+ for _, m := range members {
+ if m.IsLearner {
+ numLearners++
+ }
+ }
+ if numLearners+1 > maxLearners {
+ return ErrTooManyLearners
+ }
+ }
+ }
+
+ case raftpb.ConfChangeRemoveNode:
+ if members[id] == nil {
+ return ErrIDNotFound
+ }
+
+ case raftpb.ConfChangeUpdateNode:
+ // 有这个成员,且peer地址不存在
+ if members[id] == nil {
+ return ErrIDNotFound
+ }
+ urls := make(map[string]bool)
+ for _, m := range members {
+ if m.ID == id {
+ continue
+ }
+ for _, u := range m.PeerURLs {
+ urls[u] = true
+ }
+ }
+ m := new(Member)
+ if err := json.Unmarshal([]byte(cc.Context), m); err != nil {
+ c.lg.Panic("反序列化成员失败", zap.Error(err))
+ }
+ for _, u := range m.PeerURLs {
+ if urls[u] {
+ return ErrPeerURLexists
+ }
+ }
+
+ default:
+ c.lg.Panic("未知的 ConfChange type", zap.String("type", cc.Type.String()))
+ }
+ return nil
+}
+
+// ClientURLs 所有监听客户端请求的地址
+func (c *RaftCluster) ClientURLs() []string {
+ c.Lock()
+ defer c.Unlock()
+ urls := make([]string, 0)
+ for _, p := range c.members {
+ urls = append(urls, p.ClientURLs...)
+ }
+ sort.Strings(urls)
+ return urls
+}
diff --git a/etcd/etcdserver/api/membership/over_confstate.go b/etcd/etcdserver/api/membership/over_confstate.go
new file mode 100644
index 00000000000..c2f89fbf3fc
--- /dev/null
+++ b/etcd/etcdserver/api/membership/over_confstate.go
@@ -0,0 +1,56 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "encoding/json"
+ "log"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+ "go.uber.org/zap"
+)
+
+var confStateKey = []byte("confState")
+
+// MustUnsafeSaveConfStateToBackend confstate ---> bolt.db/meta/confState
+func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.BatchTx, confState *raftpb.ConfState) {
+ confStateBytes, err := json.Marshal(confState)
+ if err != nil {
+ lg.Panic("不能序列化raftpb.ConfState", zap.Stringer("conf-state", confState), zap.Error(err))
+ }
+
+ tx.UnsafePut(buckets.Meta, confStateKey, confStateBytes)
+}
+
+// UnsafeConfStateFromBackend confstate <--- bolt.db/meta/confState
+func UnsafeConfStateFromBackend(lg *zap.Logger, tx backend.ReadTx) *raftpb.ConfState {
+ keys, vals := tx.UnsafeRange(buckets.Meta, confStateKey, nil, 0)
+ if len(keys) == 0 {
+ return nil
+ }
+
+ if len(keys) != 1 {
+ lg.Panic("不期待的key: "+string(confStateKey)+" 当从bolt获取集群版本", zap.Int("number-of-key", len(keys)))
+ }
+ var confState raftpb.ConfState
+ if err := json.Unmarshal(vals[0], &confState); err != nil {
+ log.Panic("从bolt.db获取到的值无法反序列化",
+ zap.ByteString("conf-state-json", []byte(vals[0])),
+ zap.Error(err))
+ }
+ return &confState
+}
diff --git a/etcd/etcdserver/api/membership/over_downgrade.go b/etcd/etcdserver/api/membership/over_downgrade.go
new file mode 100644
index 00000000000..6ac61c5295e
--- /dev/null
+++ b/etcd/etcdserver/api/membership/over_downgrade.go
@@ -0,0 +1,62 @@
+// Copyright 2020 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "github.com/coreos/go-semver/semver"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ "go.uber.org/zap"
+)
+
+type DowngradeInfo struct {
+ TargetVersion string `json:"target-version"` // 是目标降级版本,如果集群不在降级中,targetVersion将是一个空字符串.
+ Enabled bool `json:"enabled"` // 表示集群是否启用了降级功能
+}
+
+func (d *DowngradeInfo) GetTargetVersion() *semver.Version {
+ return semver.Must(semver.NewVersion(d.TargetVersion))
+}
+
+// mustDetectDowngrade 检测版本降级.
+func mustDetectDowngrade(lg *zap.Logger, cv *semver.Version, d *DowngradeInfo) {
+ lv := semver.Must(semver.NewVersion(version.Version))
+ lv = &semver.Version{Major: lv.Major, Minor: lv.Minor}
+
+ // 如果集群启用了降级功能,请对照降级目标版本检查本地版本.
+ if d != nil && d.Enabled && d.TargetVersion != "" {
+ if lv.Equal(*d.GetTargetVersion()) {
+ if cv != nil {
+ lg.Info("集群正在降级到目标版本", zap.String("target-cluster-version", d.TargetVersion), zap.String("determined-cluster-version", version.Cluster(cv.String())), zap.String("current-etcd-version", version.Version))
+ }
+ return
+ }
+ lg.Fatal("无效的降级;当降级被启用时,etcd版本不允许加入", zap.String("current-etcd-version", version.Version), zap.String("target-cluster-version", d.TargetVersion))
+ }
+
+ // 如果集群禁止降级,则根据确定的集群版本检查本地版本,如果本地版本不低于集群版本,则验证通过
+ if cv != nil && lv.LessThan(*cv) {
+ lg.Fatal("无效的降级;etcd版本低于确定的集群版本", zap.String("current-etcd-version", version.Version), zap.String("determined-cluster-version", version.Cluster(cv.String())))
+ }
+}
+
+// AllowedDowngradeVersion 允许版本降级
+func AllowedDowngradeVersion(ver *semver.Version) *semver.Version {
+ return &semver.Version{Major: ver.Major, Minor: ver.Minor - 1}
+}
+
+// isValidDowngrade 验证集群是否可以从verFrom降级到verTo 小版本差1
+func isValidDowngrade(verFrom *semver.Version, verTo *semver.Version) bool {
+ return verTo.Equal(*AllowedDowngradeVersion(verFrom))
+}
diff --git a/etcd/etcdserver/api/membership/over_node_change.go b/etcd/etcdserver/api/membership/over_node_change.go
new file mode 100644
index 00000000000..a9ded70b82d
--- /dev/null
+++ b/etcd/etcdserver/api/membership/over_node_change.go
@@ -0,0 +1,293 @@
+package membership
+
+import (
+ "crypto/sha1"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math/rand"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+ "go.uber.org/zap"
+)
+
+type Member struct {
+ ID types.ID `json:"id"` // hash得到的, 本节点ID
+ RaftAttributes // 与raft相关的etcd成员属性
+ Attributes // 代表一个etcd成员的所有非raft的相关属性
+}
+
+// RaftAttributes 与raft相关的etcd成员属性
+type RaftAttributes struct {
+ PeerURLs []string `json:"peerURLs"` // 是raft集群中的对等体列表.
+ IsLearner bool `json:"isLearner,omitempty"` // 表示该成员是否是raft Learner.
+}
+
+// Attributes 代表一个etcd成员的所有非raft的相关属性.
+type Attributes struct {
+ Name string `json:"name,omitempty"` // 节点创建时设置的name 默认default
+ ClientURLs []string `json:"clientURLs,omitempty"` // 当接受到来自该Name的请求时,会
+}
+
+// NewMember 创建一个没有ID的成员,并根据集群名称、peer的URLS 和时间生成一个ID.这是用来引导/添加新成员的.
+func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
+ memberId := computeMemberId(peerURLs, clusterName, now)
+ return newMember(name, peerURLs, memberId, false)
+}
+
+// NewMemberAsLearner 创建一个没有ID的成员,并根据集群名称、peer的URLS 和时间生成一个ID.这是用来引导新learner成员的.
+func NewMemberAsLearner(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
+ memberId := computeMemberId(peerURLs, clusterName, now)
+ return newMember(name, peerURLs, memberId, true)
+}
+
+// IsReadyToAddVotingMember OK
+func (c *RaftCluster) IsReadyToAddVotingMember() bool {
+ nmembers := 1 // 新添加的节点 先置1
+ nstarted := 0
+
+ for _, member := range c.VotingMembers() {
+ if member.IsStarted() {
+ nstarted++
+ }
+ nmembers++
+ }
+
+ if nstarted == 1 && nmembers == 2 {
+ // 在一个成员集群中添加一个新节点,用于恢复集群数据
+ c.lg.Debug("启动成员数为1;是否可以接受添加成员的请求")
+ return true
+ }
+
+ nquorum := nmembers/2 + 1
+ if nstarted < nquorum {
+ c.lg.Warn("拒绝添加成员;启动的成员将少于法定人数", zap.Int("number-of-started-member", nstarted), zap.Int("quorum", nquorum), zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()))
+ return false
+ }
+
+ return true
+}
+
+func (c *RaftCluster) IsReadyToRemoveVotingMember(id uint64) bool {
+ nmembers := 0
+ nstarted := 0
+
+ for _, member := range c.VotingMembers() {
+ if uint64(member.ID) == id {
+ continue
+ }
+
+ if member.IsStarted() {
+ nstarted++
+ }
+ nmembers++
+ }
+
+ nquorum := nmembers/2 + 1
+ if nstarted < nquorum {
+ c.lg.Warn(
+ "rejecting member remove; started member will be less than quorum",
+ zap.Int("number-of-started-member", nstarted),
+ zap.Int("quorum", nquorum),
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ return false
+ }
+
+ return true
+}
+
+// IsReadyToPromoteMember 是否准备好提升节点角色, 提升以后现有成员是否可以达到大多数
+func (c *RaftCluster) IsReadyToPromoteMember(id uint64) bool {
+ nmembers := 1 // 我们为未来的法定人数计算被提拔的学习者
+ nstarted := 1
+
+ for _, member := range c.VotingMembers() {
+ if member.IsStarted() {
+ nstarted++
+ }
+ nmembers++
+ }
+
+ nquorum := nmembers/2 + 1
+ if nstarted < nquorum {
+ c.lg.Warn("拒绝成员晋升;启动成员将少于法定人数",
+ zap.Int("number-of-started-member", nstarted),
+ zap.Int("quorum", nquorum),
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ return false
+ }
+
+ return true
+}
+
+// ------------------------------------------------ over ------------------------------------------------
+
+// PromoteMember 将该成员的IsLearner属性标记为false.
+func (c *RaftCluster) PromoteMember(id types.ID, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.members[id].RaftAttributes.IsLearner = false
+ if c.v2store != nil {
+ // 内存里面的一个树形node结构
+ mustUpdateMemberInStore(c.lg, c.v2store, c.members[id])
+ }
+ if c.be != nil && shouldApplyV3 {
+ unsafeSaveMemberToBackend(c.lg, c.be, c.members[id])
+ }
+
+ c.lg.Info("成员角色提升", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()))
+}
+
+// AddMember 在集群中添加一个新的成员,并将给定成员的raftAttributes保存到存储空间.给定的成员应该有空的属性. 一个具有匹配id的成员必须不存在.
+func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+
+ var v2Err, beErr error
+ if c.v2store != nil {
+ v2Err = unsafeSaveMemberToStore(c.lg, c.v2store, m)
+ if v2Err != nil {
+ if e, ok := v2Err.(*v2error.Error); !ok || e.ErrorCode != v2error.EcodeNodeExist {
+ c.lg.Panic("保存member到v2store失败", zap.String("member-id", m.ID.String()), zap.Error(v2Err))
+ }
+ }
+ }
+ _ = backend.MyBackend{}
+ if c.be != nil && shouldApplyV3 {
+ beErr = unsafeSaveMemberToBackend(c.lg, c.be, m) // 保存到bolt.db members
+ if beErr != nil && !errors.Is(beErr, errMemberAlreadyExist) {
+ c.lg.Panic("保存member到backend失败", zap.String("member-id", m.ID.String()), zap.Error(beErr))
+ }
+ }
+ if v2Err != nil && (beErr != nil || c.be == nil) {
+ c.lg.Panic("保存member到store失败", zap.String("member-id", m.ID.String()), zap.Error(v2Err))
+ }
+
+ c.members[m.ID] = m
+
+ c.lg.Info("添加成员", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("added-peer-id", m.ID.String()), zap.Strings("added-peer-peer-urls", m.PeerURLs))
+}
+
+// RemoveMember store中必须存在该ID,否则会panic
+func (c *RaftCluster) RemoveMember(id types.ID, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+ var v2Err, beErr error
+ if c.v2store != nil {
+ v2Err = unsafeDeleteMemberFromStore(c.v2store, id)
+ if v2Err != nil {
+ if e, ok := v2Err.(*v2error.Error); !ok || e.ErrorCode != v2error.EcodeKeyNotFound {
+ c.lg.Panic("从v2store删除节点失败", zap.String("member-id", id.String()), zap.Error(v2Err))
+ }
+ }
+ }
+ if c.be != nil && shouldApplyV3 {
+ beErr = unsafeDeleteMemberFromBackend(c.be, id)
+ if beErr != nil && !errors.Is(beErr, errMemberNotFound) {
+ c.lg.Panic("从backend bolt 删除节点失败", zap.String("member-id", id.String()), zap.Error(beErr))
+ }
+ }
+ if v2Err != nil && (beErr != nil || c.be == nil) {
+ c.lg.Panic("从store中删除节点失败", zap.String("member-id", id.String()), zap.Error(v2Err))
+ }
+
+ m, ok := c.members[id]
+ delete(c.members, id)
+ c.removed[id] = true
+
+ if ok {
+ c.lg.Info("移除成员", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("removed-remote-peer-id", id.String()), zap.Strings("removed-remote-peer-urls", m.PeerURLs))
+ } else {
+ c.lg.Warn("该成员已经移除", zap.String("cluster-id", c.cid.String()), zap.String("local-member-id", c.localID.String()), zap.String("removed-remote-peer-id", id.String()))
+ }
+}
+
+// 计算成员ID
+func computeMemberId(peerURLs types.URLs, clusterName string, now *time.Time) types.ID {
+ peerURLstrs := peerURLs.StringSlice()
+ sort.Strings(peerURLstrs)
+ joinedPeerUrls := strings.Join(peerURLstrs, "")
+ b := []byte(joinedPeerUrls)
+
+ b = append(b, []byte(clusterName)...)
+ if now != nil {
+ b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...)
+ }
+
+ hash := sha1.Sum(b)
+ return types.ID(binary.BigEndian.Uint64(hash[:8]))
+}
+
+func newMember(name string, peerURLs types.URLs, memberId types.ID, isLearner bool) *Member {
+ m := &Member{
+ RaftAttributes: RaftAttributes{
+ PeerURLs: peerURLs.StringSlice(),
+ IsLearner: isLearner,
+ },
+ Attributes: Attributes{Name: name},
+ ID: memberId,
+ }
+ return m
+}
+
+// PickPeerURL 随机从 Member's PeerURLs 选择一个
+func (m *Member) PickPeerURL() string {
+ if len(m.PeerURLs) == 0 {
+ panic("peer url 应该>0")
+ }
+ return m.PeerURLs[rand.Intn(len(m.PeerURLs))]
+}
+
+// Clone 返回member deepcopy
+func (m *Member) Clone() *Member {
+ if m == nil {
+ return nil
+ }
+ mm := &Member{
+ ID: m.ID,
+ RaftAttributes: RaftAttributes{
+ IsLearner: m.IsLearner,
+ },
+ Attributes: Attributes{
+ Name: m.Name,
+ },
+ }
+ if m.PeerURLs != nil {
+ mm.PeerURLs = make([]string, len(m.PeerURLs))
+ copy(mm.PeerURLs, m.PeerURLs)
+ }
+ if m.ClientURLs != nil {
+ mm.ClientURLs = make([]string, len(m.ClientURLs))
+ copy(mm.ClientURLs, m.ClientURLs)
+ }
+ return mm
+}
+
+func (m *Member) IsStarted() bool {
+ return len(m.Name) != 0
+}
+
+type MembersByID []*Member
+
+func (ms MembersByID) Len() int { return len(ms) }
+func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID }
+func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
+
+type MembersByPeerURLs []*Member
+
+func (ms MembersByPeerURLs) Len() int { return len(ms) }
+func (ms MembersByPeerURLs) Less(i, j int) bool {
+ return ms[i].PeerURLs[0] < ms[j].PeerURLs[0]
+}
+func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
diff --git a/etcd/etcdserver/api/membership/over_store.go b/etcd/etcdserver/api/membership/over_store.go
new file mode 100644
index 00000000000..8ef22e866ad
--- /dev/null
+++ b/etcd/etcdserver/api/membership/over_store.go
@@ -0,0 +1,370 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "path"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+const (
+ attributesSuffix = "attributes"
+ raftAttributesSuffix = "raftAttributes"
+ storePrefix = "/0" // 在store中存储成员信息的前缀
+
+)
+
+var (
+ StoreMembersPrefix = path.Join(storePrefix, "members") // /0/members
+ storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members") // /0/removed_members
+ errMemberAlreadyExist = fmt.Errorf("member already exists")
+ errMemberNotFound = fmt.Errorf("member not found")
+)
+
+// v2store 更新成员属性
+func mustUpdateMemberAttrInStore(lg *zap.Logger, s v2store.Store, m *Member) {
+ b, err := json.Marshal(m.Attributes)
+ if err != nil {
+ lg.Panic("序列化 属性失败", zap.Error(err))
+ }
+ p := path.Join(MemberStoreKey(m.ID), attributesSuffix)
+ if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ lg.Panic("更新属性失败", zap.String("path", p), zap.Error(err))
+ }
+}
+
+// v2store 保存集群版本
+func mustSaveClusterVersionToStore(lg *zap.Logger, s v2store.Store, ver *semver.Version) {
+ if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ lg.Panic(
+ "保存集群版本到store失败",
+ zap.String("path", StoreClusterVersionKey()),
+ zap.Error(err),
+ )
+ }
+}
+
+// 创建blot.db存储桶
+func mustCreateBackendBuckets(be backend.Backend) {
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(buckets.Members)
+ tx.UnsafeCreateBucket(buckets.MembersRemoved)
+ tx.UnsafeCreateBucket(buckets.Cluster)
+}
+
+// MemberAttributesStorePath v2store 成员属性路径
+func MemberAttributesStorePath(id types.ID) string {
+ return path.Join(MemberStoreKey(id), attributesSuffix)
+}
+
+func mustParseMemberIDFromBytes(lg *zap.Logger, key []byte) types.ID {
+ id, err := types.IDFromString(string(key))
+ if err != nil {
+ lg.Panic("从key解析成员ID失败", zap.Error(err))
+ }
+ return id
+}
+
+// OK
+func mustSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) {
+ err := unsafeSaveMemberToStore(lg, s, m)
+ if err != nil {
+ lg.Panic(
+ "保存member到store失败",
+ zap.String("member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ }
+}
+
+// node---->v2store [memory]
+func unsafeSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) error {
+ b, err := json.Marshal(m.RaftAttributes) // 是raft集群中的对等体列表. 表示该成员是否是raft Learner.
+ if err != nil {
+ lg.Panic("序列化失败raftAttributes", zap.Error(err))
+ }
+ _ = computeMemberId // id 由这个函数生成,需要 peerURLs clusterName 创建时间,创建时间一般为nil
+ p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) // /0/members/123/raftAttributes
+ _, err = s.Create(p, false, string(b), // ✅
+ false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent})
+ return err
+}
+
+func mustUpdateMemberInStore(lg *zap.Logger, s v2store.Store, m *Member) {
+ // s 内存里面的一个树形node结构
+ b, err := json.Marshal(m.RaftAttributes) // 是raft集群中的对等体列表. 表示该成员是否是raft Learner.
+ if err != nil {
+ lg.Panic("序列化raft相关属性失败", zap.Error(err))
+ }
+ p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) // 123213/raftAttributes
+ if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ lg.Panic("更新raftAttributes失败", zap.String("path", p), zap.Error(err))
+ }
+}
+
+// MustParseMemberIDFromKey ok
+func MustParseMemberIDFromKey(lg *zap.Logger, key string) types.ID {
+ id, err := types.IDFromString(path.Base(key)) // /0/members/8e9e05c52164694d
+ if err != nil {
+ lg.Panic("从key解析member ID 失败", zap.Error(err))
+ }
+ return id
+}
+
+// 将member保存到Backend bolt.db
+func unsafeSaveMemberToBackend(lg *zap.Logger, be backend.Backend, m *Member) error {
+ mkey := backendMemberKey(m.ID) // 16进制字符串
+ mvalue, err := json.Marshal(m)
+ if err != nil {
+ lg.Panic("序列化失败", zap.Error(err))
+ }
+
+ tx := be.BatchTx() // 写事务
+ tx.Lock()
+ defer tx.Unlock()
+ if unsafeMemberExists(tx, mkey) { // ✅
+ return errMemberAlreadyExist
+ }
+ tx.UnsafePut(buckets.Members, mkey, mvalue)
+ return nil
+}
+
+// MemberStoreKey 15 -----> /0/members/e
+func MemberStoreKey(id types.ID) string {
+ return path.Join(StoreMembersPrefix, id.String()) // /0/members/e
+}
+
+// RemovedMemberStoreKey 15 -----> /0/removed_members/e
+func RemovedMemberStoreKey(id types.ID) string {
+ return path.Join(storeRemovedMembersPrefix, id.String())
+}
+
+// 移除节点,并添加到removed_member
+func unsafeDeleteMemberFromStore(s v2store.Store, id types.ID) error {
+ if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil {
+ return err
+ }
+ if _, err := s.Create(RemovedMemberStoreKey(id), // ✅
+ false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ return err
+ }
+ return nil
+}
+
+// 首先遍历bolt.db members下的所有k,v
+func unsafeMemberExists(tx backend.ReadTx, mkey []byte) bool {
+ var found bool
+ tx.UnsafeForEach(buckets.Members, func(k, v []byte) error {
+ if bytes.Equal(k, mkey) {
+ found = true
+ }
+ return nil
+ })
+ return found
+}
+
+// 从bolt.db删除节点信息
+func unsafeDeleteMemberFromBackend(be backend.Backend, id types.ID) error {
+ mkey := backendMemberKey(id)
+
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ tx.UnsafePut(buckets.MembersRemoved, mkey, []byte("removed")) // 更新
+ if !unsafeMemberExists(tx, mkey) {
+ return errMemberNotFound
+ }
+ tx.UnsafeDelete(buckets.Members, mkey)
+ return nil
+}
+
+// 在bolt.db存储的key
+func backendMemberKey(id types.ID) []byte {
+ return []byte(id.String())
+}
+
+// nodeToMember 从node构建一个member
+func nodeToMember(lg *zap.Logger, n *v2store.NodeExtern) (*Member, error) {
+ m := &Member{ID: MustParseMemberIDFromKey(lg, n.Key)}
+ attrs := make(map[string][]byte)
+ raftAttrKey := path.Join(n.Key, raftAttributesSuffix) // /0/members/8e9e05c52164694d/raftAttributes
+ attrKey := path.Join(n.Key, attributesSuffix) // /0/members/8e9e05c52164694d/raftAttributes/attributes
+ // &v2store.NodeExtern{Key: "/0/members/8e9e05c52164694d", ExternNodes: []*v2store.NodeExtern{
+ // {Key: "/0/members/8e9e05c52164694d/raftAttributes/attributes", Value: stringp(`{"name":"node1","clientURLs":null}`)},
+ // {Key: "/0/members/8e9e05c52164694d/raftAttributes", Value: stringp(`{"peerURLs":null}`)},
+ // }}
+ for _, nn := range n.ExternNodes {
+ if nn.Key != raftAttrKey && nn.Key != attrKey {
+ return nil, fmt.Errorf("未知的 key %q", nn.Key)
+ }
+ attrs[nn.Key] = []byte(*nn.Value)
+ }
+ if data := attrs[raftAttrKey]; data != nil {
+ if err := json.Unmarshal(data, &m.RaftAttributes); err != nil {
+ return nil, fmt.Errorf("反序列化 raftAttributes 失败: %v", err)
+ }
+ } else {
+ return nil, fmt.Errorf("raftAttributes key不存在")
+ }
+ if data := attrs[attrKey]; data != nil {
+ if err := json.Unmarshal(data, &m.Attributes); err != nil {
+ return m, fmt.Errorf("反序列化 attributes 失败: %v", err)
+ }
+ }
+ return m, nil
+}
+
+// TrimClusterFromBackend 从bolt.db移除cluster 桶
+func TrimClusterFromBackend(be backend.Backend) error {
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ tx.UnsafeDeleteBucket(buckets.Cluster)
+ return nil
+}
+
+// 读取bolt.db中的member桶
+func readMembersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool, error) {
+ members := make(map[types.ID]*Member)
+ removed := make(map[types.ID]bool)
+
+ tx := be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ err := tx.UnsafeForEach(buckets.Members, func(k, v []byte) error {
+ memberId := mustParseMemberIDFromBytes(lg, k)
+ m := &Member{ID: memberId}
+ if err := json.Unmarshal(v, &m); err != nil {
+ return err
+ }
+ members[memberId] = m
+ return nil
+ })
+ if err != nil {
+ return nil, nil, fmt.Errorf("不能读取bolt.db中的member桶: %w", err)
+ }
+
+ err = tx.UnsafeForEach(buckets.MembersRemoved, func(k, v []byte) error {
+ memberId := mustParseMemberIDFromBytes(lg, k)
+ removed[memberId] = true
+ return nil
+ })
+ if err != nil {
+ return nil, nil, fmt.Errorf("不能读取bolt.db中的 members_removed 桶: %w", err)
+ }
+ return members, removed, nil
+}
+
+func membersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool) {
+ return mustReadMembersFromBackend(lg, be)
+}
+
+// 从bolt.db读取成员信息
+func mustReadMembersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool) {
+ members, removed, err := readMembersFromBackend(lg, be)
+ if err != nil {
+ lg.Panic("不能从bolt.db读取成员信息", zap.Error(err))
+ }
+ return members, removed
+}
+
+// TrimMembershipFromBackend 从bolt.db删除成员信息
+func TrimMembershipFromBackend(lg *zap.Logger, be backend.Backend) error {
+ lg.Info("开始删除成员信息...")
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ err := tx.UnsafeForEach(buckets.Members, func(k, v []byte) error {
+ tx.UnsafeDelete(buckets.Members, k)
+ lg.Debug("删除成员信息", zap.Stringer("member", mustParseMemberIDFromBytes(lg, k)))
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ return tx.UnsafeForEach(buckets.MembersRemoved, func(k, v []byte) error {
+ tx.UnsafeDelete(buckets.MembersRemoved, k)
+ lg.Debug("删除 已移除的成员信息", zap.Stringer("member", mustParseMemberIDFromBytes(lg, k)))
+ return nil
+ })
+}
+
+// TrimMembershipFromV2Store 从v2store删除所有节点信息
+func TrimMembershipFromV2Store(lg *zap.Logger, s v2store.Store) error {
+ members, removed := membersFromStore(lg, s)
+
+ for mID := range members {
+ _, err := s.Delete(MemberStoreKey(mID), true, true)
+ if err != nil {
+ return err
+ }
+ }
+ for mID := range removed {
+ _, err := s.Delete(RemovedMemberStoreKey(mID), true, true)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// 保存集群版本到bolt.db
+func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) {
+ ckey := backendClusterVersionKey()
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ tx.UnsafePut(buckets.Cluster, ckey, []byte(ver.String()))
+}
+
+// bolt.db 集群版本key
+func backendClusterVersionKey() []byte {
+ return []byte("clusterVersion")
+}
+
+func backendDowngradeKey() []byte {
+ return []byte("downgrade")
+}
+
+// 保存降级信息到blot.db
+func mustSaveDowngradeToBackend(lg *zap.Logger, be backend.Backend, downgrade *DowngradeInfo) {
+ dkey := backendDowngradeKey() // downgrade
+ dvalue, err := json.Marshal(downgrade)
+ if err != nil {
+ lg.Panic("序列化降级信息失败", zap.Error(err))
+ }
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ tx.UnsafePut(buckets.Cluster, dkey, dvalue)
+}
+
+// StoreClusterVersionKey v2store中集群版本路径
+func StoreClusterVersionKey() string { // /0/version
+ return path.Join(storePrefix, "version")
+}
diff --git a/etcd/etcdserver/api/membership/over_storev2.go b/etcd/etcdserver/api/membership/over_storev2.go
new file mode 100644
index 00000000000..2e92ba7956b
--- /dev/null
+++ b/etcd/etcdserver/api/membership/over_storev2.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+)
+
+// IsMetaStoreOnly 验证给定的`store`是否只包含元信息(成员,版本);可以从后端(storev3)恢复,而不是用户数据.
+func IsMetaStoreOnly(store v2store.Store) (bool, error) {
+ event, err := store.Get("/", true, false)
+ if err != nil {
+ return false, err
+ }
+ for _, n := range event.NodeExtern.ExternNodes {
+ if n.Key != storePrefix && n.ExternNodes.Len() > 0 {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
diff --git a/etcd/etcdserver/api/over_cluster.go b/etcd/etcdserver/api/over_cluster.go
new file mode 100644
index 00000000000..491fa8e41a5
--- /dev/null
+++ b/etcd/etcdserver/api/over_cluster.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+
+ "github.com/coreos/go-semver/semver"
+)
+
+type Cluster interface {
+ ID() types.ID // 集群ID
+ ClientURLs() []string // 返回该集群正在监听客户端请求的所有URL的集合.
+ Members() []*membership.Member // 集群成员,排序之后的
+ Member(id types.ID) *membership.Member
+ Version() *semver.Version
+}
diff --git a/server/etcdserver/api/rafthttp/msg_codec.go b/etcd/etcdserver/api/rafthttp/msg_codec.go
similarity index 86%
rename from server/etcdserver/api/rafthttp/msg_codec.go
rename to etcd/etcdserver/api/rafthttp/msg_codec.go
index 5444c01f8fd..98b5ea3ad5b 100644
--- a/server/etcdserver/api/rafthttp/msg_codec.go
+++ b/etcd/etcdserver/api/rafthttp/msg_codec.go
@@ -19,12 +19,10 @@ import (
"errors"
"io"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/raft/v3/raftpb"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
)
-// messageEncoder is a encoder that can encode all kinds of messages.
-// It MUST be used with a paired messageDecoder.
type messageEncoder struct {
w io.Writer
}
@@ -37,7 +35,6 @@ func (enc *messageEncoder) encode(m *raftpb.Message) error {
return err
}
-// messageDecoder is a decoder that can decode all kinds of messages.
type messageDecoder struct {
r io.Reader
}
diff --git a/server/etcdserver/api/rafthttp/msgappv2_codec.go b/etcd/etcdserver/api/rafthttp/msgappv2_codec.go
similarity index 80%
rename from server/etcdserver/api/rafthttp/msgappv2_codec.go
rename to etcd/etcdserver/api/rafthttp/msgappv2_codec.go
index 59425aeea69..b6c57878a41 100644
--- a/server/etcdserver/api/rafthttp/msgappv2_codec.go
+++ b/etcd/etcdserver/api/rafthttp/msgappv2_codec.go
@@ -15,52 +15,26 @@
package rafthttp
import (
+ "bytes"
"encoding/binary"
"fmt"
"io"
+ "io/ioutil"
"time"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
- "go.etcd.io/raft/v3/raftpb"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
)
const (
msgTypeLinkHeartbeat uint8 = 0
msgTypeAppEntries uint8 = 1
msgTypeApp uint8 = 2
-
- msgAppV2BufSize = 1024 * 1024
+ msgAppV2BufSize = 1024 * 1024
)
-// msgappv2 stream sends three types of message: linkHeartbeatMessage,
-// AppEntries and MsgApp. AppEntries is the MsgApp that is sent in
-// replicate state in raft, whose index and term are fully predictable.
-//
-// Data format of linkHeartbeatMessage:
-// | offset | bytes | description |
-// +--------+-------+-------------+
-// | 0 | 1 | \x00 |
-//
-// Data format of AppEntries:
-// | offset | bytes | description |
-// +--------+-------+-------------+
-// | 0 | 1 | \x01 |
-// | 1 | 8 | length of entries |
-// | 9 | 8 | length of first entry |
-// | 17 | n1 | first entry |
-// ...
-// | x | 8 | length of k-th entry data |
-// | x+8 | nk | k-th entry data |
-// | x+8+nk | 8 | commit index |
-//
-// Data format of MsgApp:
-// | offset | bytes | description |
-// +--------+-------+-------------+
-// | 0 | 1 | \x02 |
-// | 1 | 8 | length of encoded message |
-// | 9 | n | encoded message |
type msgAppV2Encoder struct {
w io.Writer
fs *stats.FollowerStats
@@ -101,13 +75,14 @@ func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error {
return err
}
for i := 0; i < len(m.Entries); i++ {
- // write length of entry
binary.BigEndian.PutUint64(enc.uint64buf, uint64(m.Entries[i].Size()))
if _, err := enc.w.Write(enc.uint64buf); err != nil {
return err
}
if n := m.Entries[i].Size(); n < msgAppV2BufSize {
- if _, err := m.Entries[i].MarshalTo(enc.buf); err != nil {
+ temp, err := m.Entries[i].Marshal()
+ enc.buf = append(enc.buf, temp...)
+ if err != nil {
return err
}
if _, err := enc.w.Write(enc.buf[:n]); err != nil {
@@ -176,6 +151,8 @@ func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) {
m raftpb.Message
typ uint8
)
+ xxx, _ := ioutil.ReadAll(dec.r)
+ dec.r = bytes.NewReader(xxx)
if _, err := io.ReadFull(dec.r, dec.uint8buf); err != nil {
return m, err
}
diff --git a/etcd/etcdserver/api/rafthttp/over_coder.go b/etcd/etcdserver/api/rafthttp/over_coder.go
new file mode 100644
index 00000000000..3c3b517486c
--- /dev/null
+++ b/etcd/etcdserver/api/rafthttp/over_coder.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import "github.com/ls-2018/etcd_cn/raft/raftpb"
+
+type encoder interface {
+ encode(m *raftpb.Message) error
+}
+
+type decoder interface {
+ decode() (raftpb.Message, error)
+}
diff --git a/etcd/etcdserver/api/rafthttp/over_http.go b/etcd/etcdserver/api/rafthttp/over_http.go
new file mode 100644
index 00000000000..ef2a2527d5e
--- /dev/null
+++ b/etcd/etcdserver/api/rafthttp/over_http.go
@@ -0,0 +1,121 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "errors"
+ "net/http"
+ "path"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "go.uber.org/zap"
+)
+
+const (
+ connReadLimitByte = 64 * 1024 // 链接最大的读取数据量
+ snapshotLimitByte = 1 * 1024 * 1024 * 1024 * 1024 // 快照大小上限
+)
+
+var (
+ RaftPrefix = "/raft"
+ ProbingPrefix = path.Join(RaftPrefix, "probing")
+ RaftStreamPrefix = path.Join(RaftPrefix, "stream")
+ RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot")
+ errIncompatibleVersion = errors.New("不兼容的版本")
+ errClusterIDMismatch = errors.New("cluster ID不匹配")
+)
+
+type peerGetter interface {
+ Get(id types.ID) Peer
+}
+
+type writerToResponse interface {
+ WriteTo(w http.ResponseWriter)
+}
+
+// checkClusterCompatibilityFromHeader 检查集群版本的兼容性
+//.它检查本地成员的版本是否与报头中的版本兼容,以及本地成员的集群ID是否与报头中的ID一致.
+func checkClusterCompatibilityFromHeader(lg *zap.Logger, localID types.ID, header http.Header, cid types.ID) error {
+ remoteName := header.Get("X-Server-From")
+ remoteServer := serverVersion(header)
+ remoteVs := ""
+ if remoteServer != nil {
+ remoteVs = remoteServer.String()
+ }
+
+ remoteMinClusterVer := minClusterVersion(header)
+ remoteMinClusterVs := ""
+ if remoteMinClusterVer != nil {
+ remoteMinClusterVs = remoteMinClusterVer.String()
+ }
+
+ localServer, localMinCluster, err := checkVersionCompatibility(remoteName, remoteServer, remoteMinClusterVer)
+
+ localVs := ""
+ if localServer != nil {
+ localVs = localServer.String()
+ }
+ localMinClusterVs := ""
+ if localMinCluster != nil {
+ localMinClusterVs = localMinCluster.String()
+ }
+
+ if err != nil {
+ lg.Warn(
+ "检查版本兼容性失败",
+ zap.String("local-member-id", localID.String()),
+ zap.String("local-member-cluster-id", cid.String()),
+ zap.String("local-member-etcd-version", localVs),
+ zap.String("local-member-etcd-minimum-cluster-version", localMinClusterVs),
+ zap.String("remote-peer-etcd-name", remoteName),
+ zap.String("remote-peer-etcd-version", remoteVs),
+ zap.String("remote-peer-etcd-minimum-cluster-version", remoteMinClusterVs),
+ zap.Error(err),
+ )
+ return errIncompatibleVersion
+ }
+ if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() {
+ lg.Warn(
+ "集群ID不匹配",
+ zap.String("local-member-id", localID.String()),
+ zap.String("local-member-cluster-id", cid.String()),
+ zap.String("local-member-etcd-version", localVs),
+ zap.String("local-member-etcd-minimum-cluster-version", localMinClusterVs),
+ zap.String("remote-peer-etcd-name", remoteName),
+ zap.String("remote-peer-etcd-version", remoteVs),
+ zap.String("remote-peer-etcd-minimum-cluster-version", remoteMinClusterVs),
+ zap.String("remote-peer-cluster-id", gcid),
+ )
+ return errClusterIDMismatch
+ }
+ return nil
+}
+
+type closeNotifier struct {
+ done chan struct{}
+}
+
+func newCloseNotifier() *closeNotifier {
+ return &closeNotifier{
+ done: make(chan struct{}),
+ }
+}
+
+func (n *closeNotifier) Close() error {
+ close(n.done)
+ return nil
+}
+
+func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done }
diff --git a/etcd/etcdserver/api/rafthttp/over_raft_pipeline_api.go b/etcd/etcdserver/api/rafthttp/over_raft_pipeline_api.go
new file mode 100644
index 00000000000..fcd92554caf
--- /dev/null
+++ b/etcd/etcdserver/api/rafthttp/over_raft_pipeline_api.go
@@ -0,0 +1,85 @@
+package rafthttp
+
+import (
+ "context"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ pioutil "github.com/ls-2018/etcd_cn/pkg/ioutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+ "go.uber.org/zap"
+)
+
+// newPipelineHandler Pipeline 类型通道用于处理数据量大的消息例如 Snapshot .这种类型的
+// 消息需要与心跳等消息分开处理否则会阻塞心跳包的传输进而影响集群的稳定性.使用Pipeline 类型通道进行通信时点到点之间不维护HTTP 长链接
+// 它只通过短链接传输数据用完即关闭.
+func newPipelineHandler(t *Transport, r Raft, cid types.ID) http.Handler {
+ h := &pipelineHandler{
+ lg: t.Logger,
+ localID: t.ID,
+ tr: t,
+ r: r,
+ cid: cid,
+ }
+ if h.lg == nil {
+ h.lg = zap.NewNop()
+ }
+ return h
+}
+
+type pipelineHandler struct {
+ lg *zap.Logger
+ localID types.ID
+ tr Transporter
+ r Raft
+ cid types.ID
+}
+
+func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ w.Header().Set("Allow", "POST")
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+ if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil {
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ return
+ }
+
+ addRemoteFromRequest(h.tr, r)
+
+ limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte) // 限制返回的数据大小 64K
+ b, err := ioutil.ReadAll(limitedr)
+ if err != nil {
+ h.lg.Warn("读取raft消息失败", zap.String("local-member-id", h.localID.String()), zap.Error(err))
+ http.Error(w, "读取raft消息失败", http.StatusBadRequest)
+ return
+ }
+
+ var m raftpb.Message
+ if err := m.Unmarshal(b); err != nil {
+ h.lg.Warn("发序列化raft消息失败", zap.String("local-member-id", h.localID.String()), zap.Error(err))
+ http.Error(w, "发序列化raft消息失败", http.StatusBadRequest)
+ return
+ }
+
+ if err := h.r.Process(context.TODO(), m); err != nil {
+ switch v := err.(type) {
+ case writerToResponse:
+ v.WriteTo(w)
+ default:
+ h.lg.Warn("处理raft消息错误", zap.String("local-member-id", h.localID.String()), zap.Error(err))
+ http.Error(w, "处理raft消息错误", http.StatusInternalServerError)
+ w.(http.Flusher).Flush()
+ // 断开http流的连接
+ panic(err)
+ }
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
diff --git a/etcd/etcdserver/api/rafthttp/over_raft_snapshot_api.go b/etcd/etcdserver/api/rafthttp/over_raft_snapshot_api.go
new file mode 100644
index 00000000000..f03823120a4
--- /dev/null
+++ b/etcd/etcdserver/api/rafthttp/over_raft_snapshot_api.go
@@ -0,0 +1,136 @@
+package rafthttp
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ humanize "github.com/dustin/go-humanize"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+ "go.uber.org/zap"
+)
+
+type snapshotHandler struct {
+ lg *zap.Logger
+ tr Transporter
+ r Raft
+ snapshotter *snap.Snapshotter
+
+ localID types.ID
+ cid types.ID
+}
+
+func newSnapshotHandler(t *Transport, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler {
+ h := &snapshotHandler{
+ lg: t.Logger,
+ tr: t,
+ r: r,
+ snapshotter: snapshotter,
+ localID: t.ID,
+ cid: cid,
+ }
+ if h.lg == nil {
+ h.lg = zap.NewNop()
+ }
+ return h
+}
+
+// ServeHTTP serves HTTP request to receive and process snapshot message.
+// 如果请求发送者在没有关闭基础TCP连接的情况下死亡.处理程序将继续等待请求主体,直到TCP keepalive发现连接在几分钟后被破坏.
+// 这是可接受的,因为通过其他 TCP 连接发送的快照信息仍然可以被接收和处理.接收和处理.
+// 2. 这种情况应该很少发生,所以不做进一步优化.
+func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
+
+ if r.Method != "POST" {
+ w.Header().Set("Allow", "POST")
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+ if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil {
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ return
+ }
+
+ addRemoteFromRequest(h.tr, r)
+
+ dec := &messageDecoder{r: r.Body}
+ // 快照可能超过512MB.
+ m, err := dec.decodeLimit(snapshotLimitByte) // 8字节[消息长度]+消息+snap
+ from := types.ID(m.From).String()
+ if err != nil {
+ msg := fmt.Sprintf("解码raft消息失败 (%v)", err)
+ h.lg.Warn("解码raft消息失败", zap.String("local-member-id", h.localID.String()), zap.String("remote-snapshot-sender-id", from), zap.Error(err))
+ http.Error(w, msg, http.StatusBadRequest)
+ return
+ }
+
+ msgSize := m.Size()
+
+ if m.Type != raftpb.MsgSnap {
+ h.lg.Warn(
+ "不期待的消息类型",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.String("message-type", m.Type.String()),
+ )
+ http.Error(w, "不期待的消息类型", http.StatusBadRequest)
+ return
+ }
+
+ h.lg.Info(
+ "开始接受快照",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index),
+ zap.Int("incoming-snapshot-message-size-bytes", msgSize),
+ zap.String("incoming-snapshot-message-size", humanize.Bytes(uint64(msgSize))),
+ )
+
+ n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index)
+ if err != nil {
+ msg := fmt.Sprintf("保存快照失败 (%v)", err)
+ h.lg.Warn(
+ "保存快照失败",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index),
+ zap.Error(err),
+ )
+ http.Error(w, msg, http.StatusInternalServerError)
+ return
+ }
+
+ downloadTook := time.Since(start)
+ h.lg.Info(
+ "接受并保存数据库快照",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index),
+ zap.Int64("incoming-snapshot-size-bytes", n),
+ zap.String("incoming-snapshot-size", humanize.Bytes(uint64(n))),
+ zap.String("download-took", downloadTook.String()),
+ )
+
+ if err := h.r.Process(context.TODO(), m); err != nil {
+ switch v := err.(type) {
+ case writerToResponse:
+ v.WriteTo(w)
+ default:
+ msg := fmt.Sprintf("处理消息失败 (%v)", err)
+ h.lg.Warn("处理消息失败", zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Error(err),
+ )
+ http.Error(w, msg, http.StatusInternalServerError)
+ }
+ return
+ }
+ w.WriteHeader(http.StatusNoContent)
+}
diff --git a/etcd/etcdserver/api/rafthttp/peer.go b/etcd/etcdserver/api/rafthttp/peer.go
new file mode 100644
index 00000000000..d5d696b5983
--- /dev/null
+++ b/etcd/etcdserver/api/rafthttp/peer.go
@@ -0,0 +1,370 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+)
+
+const (
+ // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates.
+ // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for
+ // tcp keepalive failing to detect a bad connection, which is at minutes level.
+ // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage
+ // to keep the connection alive.
+ // For short term pipeline connections, the connection必须是killed to avoid it being
+ // put back to http pkg connection pool.
+ DefaultConnReadTimeout = 5 * time.Second
+ DefaultConnWriteTimeout = 5 * time.Second
+
+ recvBufSize = 4096
+ // maxPendingProposals holds the proposals during one leader election process.
+ // Generally one leader election takes at most 1 sec. It should have
+ // 0-2 election conflicts, and each one takes 0.5 sec.
+ // We assume the number of concurrent proposers is smaller than 4096.
+ // One client blocks on its proposal for at least 1 sec, so 4096 is enough
+ // to hold all proposals.
+ maxPendingProposals = 4096
+
+ streamAppV2 = "streamMsgAppV2"
+ streamMsg = "streamMsg"
+ pipelineMsg = "pipeline"
+ sendSnap = "sendMsgSnap"
+)
+
+var (
+ ConnReadTimeout = DefaultConnReadTimeout // 在每个rafthttp连接上设置的读取超时 5s
+ ConnWriteTimeout = DefaultConnWriteTimeout
+)
+
+type Peer interface {
+ // send sends the message to the remote peer. The function is non-blocking
+ // and has no promise that the message will be received by the remote.
+ // When it fails to send message out, it will report the status to underlying
+ // raft.
+ send(m raftpb.Message)
+
+ // sendSnap sends the merged snapshot message to the remote peer. Its behavior
+ // is similar to send.
+ sendSnap(m snap.Message)
+
+ // update updates the urls of remote peer.
+ update(urls types.URLs)
+
+ // attachOutgoingConn attaches the outgoing connection to the peer for
+ // stream usage. After the call, the ownership of the outgoing
+ // connection hands over to the peer. The peer will close the connection
+ // when it is no longer used.
+ attachOutgoingConn(conn *outgoingConn)
+ // activeSince returns the time that the connection with the
+ // peer becomes active.
+ activeSince() time.Time
+ // stop performs any necessary finalization and terminates the peer
+ // elegantly.
+ stop()
+}
+
+// peer is the representative of a remote raft node. Local raft node sends
+// messages to the remote through peer.
+// Each peer has two underlying mechanisms to send out a message: stream and
+// pipeline.
+// A stream is a receiver initialized long-polling connection, which
+// is always open to transfer messages. Besides general stream, peer also has
+// a optimized stream for sending msgApp since msgApp accounts for large part
+// of all messages. Only raft leader uses the optimized stream to send msgApp
+// to the remote follower node.
+// A pipeline is a series of http clients that send http requests to the remote.
+// It is only used when the stream has not been established.
+type peer struct {
+ lg *zap.Logger
+
+ localID types.ID
+ // id of the remote raft peer node
+ id types.ID
+
+ r Raft
+
+ status *peerStatus
+
+ picker *urlPicker
+
+ msgAppV2Writer *streamWriter
+ writer *streamWriter
+ pipeline *pipeline
+ snapSender *snapshotSender // snapshot sender to send v3 snapshot messages
+ msgAppV2Reader *streamReader
+ msgAppReader *streamReader
+
+ recvc chan raftpb.Message
+ propc chan raftpb.Message
+
+ mu sync.Mutex
+ paused bool
+
+ cancel context.CancelFunc // cancel pending works in go routine created by peer.
+ stopc chan struct{}
+}
+
+func startPeer(t *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
+ if t.Logger != nil {
+ t.Logger.Info("starting remote peer", zap.String("remote-peer-id", peerID.String()))
+ }
+ defer func() {
+ if t.Logger != nil {
+ t.Logger.Info("started remote peer", zap.String("remote-peer-id", peerID.String()))
+ }
+ }()
+
+ status := newPeerStatus(t.Logger, t.ID, peerID)
+ picker := newURLPicker(urls)
+ errorc := t.ErrorC
+ r := t.Raft
+ pipeline := &pipeline{
+ peerID: peerID,
+ tr: t,
+ picker: picker,
+ status: status,
+ followerStats: fs,
+ raft: r,
+ errorc: errorc,
+ }
+ pipeline.start()
+
+ p := &peer{
+ lg: t.Logger,
+ localID: t.ID,
+ id: peerID,
+ r: r,
+ status: status,
+ picker: picker,
+ msgAppV2Writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
+ writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
+ pipeline: pipeline,
+ snapSender: newSnapshotSender(t, picker, peerID, status),
+ recvc: make(chan raftpb.Message, recvBufSize),
+ propc: make(chan raftpb.Message, maxPendingProposals),
+ stopc: make(chan struct{}),
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ p.cancel = cancel
+ go func() {
+ for {
+ select {
+ case mm := <-p.recvc:
+ if err := r.Process(ctx, mm); err != nil {
+ if t.Logger != nil {
+ t.Logger.Warn("failed to process Raft message", zap.Error(err))
+ }
+ }
+ case <-p.stopc:
+ return
+ }
+ }
+ }()
+
+ // r.Process might block for processing proposal when there is no leader.
+ // Thus propc必须是put into a separate routine with recvc to avoid blocking
+ // processing other raft messages.
+ go func() {
+ for {
+ select {
+ case mm := <-p.propc:
+ if err := r.Process(ctx, mm); err != nil {
+ if t.Logger != nil {
+ t.Logger.Warn("failed to process Raft message", zap.Error(err))
+ }
+ }
+ case <-p.stopc:
+ return
+ }
+ }
+ }()
+
+ p.msgAppV2Reader = &streamReader{
+ lg: t.Logger,
+ peerID: peerID,
+ typ: streamTypeMsgAppV2,
+ tr: t,
+ picker: picker,
+ status: status,
+ recvc: p.recvc,
+ propc: p.propc,
+ rl: rate.NewLimiter(t.DialRetryFrequency, 1),
+ }
+ p.msgAppReader = &streamReader{
+ lg: t.Logger,
+ peerID: peerID,
+ typ: streamTypeMessage,
+ tr: t,
+ picker: picker,
+ status: status,
+ recvc: p.recvc,
+ propc: p.propc,
+ rl: rate.NewLimiter(t.DialRetryFrequency, 1),
+ }
+
+ p.msgAppV2Reader.start()
+ p.msgAppReader.start()
+
+ return p
+}
+
+func (p *peer) send(m raftpb.Message) {
+ p.mu.Lock()
+ paused := p.paused
+ p.mu.Unlock()
+
+ if paused {
+ return
+ }
+ // 如果消息类型是snapshot则返回pipeline,如果是MsgApp则返回msgAppV2Writer,否则返回wirter
+ // wirtec创建是在
+ writec, name := p.pick(m)
+ select {
+ /* 将消息写入channel中
+ * 接收端的channel位于stream.go streamWriter.run msgc
+ */
+ case writec <- m:
+ default:
+ p.r.ReportUnreachable(m.To)
+ if isMsgSnap(m) {
+ p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
+ }
+ if p.status.isActive() {
+ if p.lg != nil {
+ p.lg.Warn(
+ "dropped internal Raft message since sending buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", p.localID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.String("remote-peer-name", name),
+ zap.Bool("remote-peer-active", p.status.isActive()),
+ )
+ }
+ } else {
+ if p.lg != nil {
+ p.lg.Warn(
+ "dropped internal Raft message since sending buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", p.localID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.String("remote-peer-name", name),
+ zap.Bool("remote-peer-active", p.status.isActive()),
+ )
+ }
+ }
+ }
+}
+
+func (p *peer) sendSnap(m snap.Message) {
+ go p.snapSender.send(m)
+}
+
+func (p *peer) update(urls types.URLs) {
+ p.picker.update(urls)
+}
+
+func (p *peer) attachOutgoingConn(conn *outgoingConn) {
+ var ok bool
+ switch conn.t {
+ case streamTypeMsgAppV2:
+ ok = p.msgAppV2Writer.attach(conn)
+ case streamTypeMessage:
+ ok = p.writer.attach(conn)
+ default:
+ if p.lg != nil {
+ p.lg.Panic("未知的stream类型", zap.String("type", conn.t.String()))
+ }
+ }
+ if !ok {
+ conn.Close()
+ }
+}
+
+func (p *peer) activeSince() time.Time { return p.status.activeSince() }
+
+// Pause pauses the peer. The peer will simply drops all incoming
+// messages without returning an error.
+func (p *peer) Pause() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.paused = true
+ p.msgAppReader.pause()
+ p.msgAppV2Reader.pause()
+}
+
+// Resume resumes a paused peer.
+func (p *peer) Resume() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.paused = false
+ p.msgAppReader.resume()
+ p.msgAppV2Reader.resume()
+}
+
+func (p *peer) stop() {
+ if p.lg != nil {
+ p.lg.Info("stopping remote peer", zap.String("remote-peer-id", p.id.String()))
+ }
+
+ defer func() {
+ if p.lg != nil {
+ p.lg.Info("stopped remote peer", zap.String("remote-peer-id", p.id.String()))
+ }
+ }()
+
+ close(p.stopc)
+ p.cancel()
+ p.msgAppV2Writer.stop()
+ p.writer.stop()
+ p.pipeline.stop()
+ p.snapSender.stop()
+ p.msgAppV2Reader.stop()
+ p.msgAppReader.stop()
+}
+
+// 根据消息的类型选择合适的消息通道,
+func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) {
+ var ok bool
+ // Considering MsgSnap may have a big size, e.g., 1G, and will block
+ // stream for a long time, only use one of the N pipelines to send MsgSnap.
+ if isMsgSnap(m) {
+ return p.pipeline.msgc, pipelineMsg
+ } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) {
+ return writec, streamAppV2
+ } else if writec, ok = p.writer.writec(); ok {
+ return writec, streamMsg
+ }
+ return p.pipeline.msgc, pipelineMsg
+}
+
+func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp }
+
+func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap }
diff --git a/server/etcdserver/api/rafthttp/peer_status.go b/etcd/etcdserver/api/rafthttp/peer_status.go
similarity index 87%
rename from server/etcdserver/api/rafthttp/peer_status.go
rename to etcd/etcdserver/api/rafthttp/peer_status.go
index cad19b2fbce..108f87eb877 100644
--- a/server/etcdserver/api/rafthttp/peer_status.go
+++ b/etcd/etcdserver/api/rafthttp/peer_status.go
@@ -20,7 +20,7 @@ import (
"sync"
"time"
- "go.etcd.io/etcd/client/pkg/v3/types"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
"go.uber.org/zap"
)
@@ -30,7 +30,7 @@ type failureType struct {
action string
}
-type peerStatus struct {
+type peerStatus struct { // 节点状态
lg *zap.Logger
local types.ID
id types.ID
@@ -54,7 +54,6 @@ func (s *peerStatus) activate() {
s.active = true
s.since = time.Now()
- activePeers.WithLabelValues(s.local.String(), s.id.String()).Inc()
}
}
@@ -67,8 +66,6 @@ func (s *peerStatus) deactivate(failure failureType, reason string) {
s.active = false
s.since = time.Time{}
- activePeers.WithLabelValues(s.local.String(), s.id.String()).Dec()
- disconnectedPeers.WithLabelValues(s.local.String(), s.id.String()).Inc()
return
}
diff --git a/server/etcdserver/api/rafthttp/pipeline.go b/etcd/etcdserver/api/rafthttp/pipeline.go
similarity index 87%
rename from server/etcdserver/api/rafthttp/pipeline.go
rename to etcd/etcdserver/api/rafthttp/pipeline.go
index b8ff3dfcadb..040887e5f46 100644
--- a/server/etcdserver/api/rafthttp/pipeline.go
+++ b/etcd/etcdserver/api/rafthttp/pipeline.go
@@ -18,16 +18,17 @@ import (
"bytes"
"context"
"errors"
- "io"
+ "io/ioutil"
"runtime"
"sync"
"time"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
- "go.etcd.io/raft/v3"
- "go.etcd.io/raft/v3/raftpb"
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
"go.uber.org/zap"
)
@@ -62,15 +63,15 @@ type pipeline struct {
func (p *pipeline) start() {
p.stopc = make(chan struct{})
- p.msgc = make(chan raftpb.Message, pipelineBufSize)
- p.wg.Add(connPerPipeline)
+ p.msgc = make(chan raftpb.Message, pipelineBufSize) // 64
+ p.wg.Add(connPerPipeline) // 4
for i := 0; i < connPerPipeline; i++ {
go p.handle()
}
if p.tr != nil && p.tr.Logger != nil {
p.tr.Logger.Info(
- "started HTTP pipelining with remote peer",
+ "与远程对等端启动HTTP管道",
zap.String("local-member-id", p.tr.ID.String()),
zap.String("remote-peer-id", p.peerID.String()),
)
@@ -83,7 +84,7 @@ func (p *pipeline) stop() {
if p.tr != nil && p.tr.Logger != nil {
p.tr.Logger.Info(
- "stopped HTTP pipelining with remote peer",
+ "停止与远程对等端HTTP管道",
zap.String("local-member-id", p.tr.ID.String()),
zap.String("remote-peer-id", p.peerID.String()),
)
@@ -110,7 +111,6 @@ func (p *pipeline) handle() {
if isMsgSnap(m) {
p.raft.ReportSnapshot(m.To, raft.SnapshotFailure)
}
- sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
continue
}
@@ -121,7 +121,6 @@ func (p *pipeline) handle() {
if isMsgSnap(m) {
p.raft.ReportSnapshot(m.To, raft.SnapshotFinish)
}
- sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(m.Size()))
case <-p.stopc:
return
}
@@ -154,7 +153,7 @@ func (p *pipeline) post(data []byte) (err error) {
return err
}
defer resp.Body.Close()
- b, err := io.ReadAll(resp.Body)
+ b, err := ioutil.ReadAll(resp.Body)
if err != nil {
p.picker.unreachable(u)
return err
diff --git a/server/etcdserver/api/rafthttp/probing_status.go b/etcd/etcdserver/api/rafthttp/probing_status.go
similarity index 78%
rename from server/etcdserver/api/rafthttp/probing_status.go
rename to etcd/etcdserver/api/rafthttp/probing_status.go
index 672a579ce62..a35d5c0aa3b 100644
--- a/server/etcdserver/api/rafthttp/probing_status.go
+++ b/etcd/etcdserver/api/rafthttp/probing_status.go
@@ -31,32 +31,13 @@ const (
)
var (
- // proberInterval must be shorter than read timeout.
+ // proberInterval必须是shorter than read timeout.
// Or the connection will time-out.
proberInterval = ConnReadTimeout - time.Second
statusMonitoringInterval = 30 * time.Second
statusErrorInterval = 5 * time.Second
)
-func addPeerToProber(lg *zap.Logger, p probing.Prober, id string, us []string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
- hus := make([]string, len(us))
- for i := range us {
- hus[i] = us[i] + ProbingPrefix
- }
-
- p.AddHTTP(id, proberInterval, hus)
-
- s, err := p.Status(id)
- if err != nil {
- if lg != nil {
- lg.Warn("failed to add peer into prober", zap.String("remote-peer-id", id), zap.Error(err))
- }
- return
- }
-
- go monitorProbingStatus(lg, s, id, roundTripperName, rttSecProm)
-}
-
func monitorProbingStatus(lg *zap.Logger, s probing.Status, id string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
// set the first interval short to log error early.
interval := statusErrorInterval
@@ -89,7 +70,6 @@ func monitorProbingStatus(lg *zap.Logger, s probing.Status, id string, roundTrip
)
}
}
- rttSecProm.WithLabelValues(id).Observe(s.SRTT().Seconds())
case <-s.StopNotify():
return
diff --git a/etcd/etcdserver/api/rafthttp/raft_stream_api.go b/etcd/etcdserver/api/rafthttp/raft_stream_api.go
new file mode 100644
index 00000000000..dca157f474e
--- /dev/null
+++ b/etcd/etcdserver/api/rafthttp/raft_stream_api.go
@@ -0,0 +1,140 @@
+package rafthttp
+
+import (
+ "net/http"
+ "path"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+
+ "go.uber.org/zap"
+)
+
+type streamHandler struct {
+ lg *zap.Logger
+ tr *Transport
+ peerGetter peerGetter
+ r Raft
+ id types.ID
+ cid types.ID
+}
+
+func newStreamHandler(t *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler {
+ h := &streamHandler{
+ lg: t.Logger,
+ tr: t,
+ peerGetter: pg,
+ r: r,
+ id: id,
+ cid: cid,
+ }
+ if h.lg == nil {
+ h.lg = zap.NewNop()
+ }
+ return h
+}
+
+// 添加远端节点的一个通信地址
+func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "GET" {
+ w.Header().Set("Allow", "GET")
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("X-Server-Version", version.Version)
+ w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+ if err := checkClusterCompatibilityFromHeader(h.lg, h.tr.ID, r.Header, h.cid); err != nil {
+ http.Error(w, err.Error(), http.StatusPreconditionFailed) // 状态 前提条件未通过
+ return
+ }
+
+ var t streamType
+ switch path.Dir(r.URL.Path) {
+ case streamTypeMsgAppV2.endpoint(h.lg): // /raft/stream/msgappv2
+ t = streamTypeMsgAppV2
+ case streamTypeMessage.endpoint(h.lg): // /raft/stream/message
+ t = streamTypeMessage
+ default:
+ h.lg.Debug("忽略意外的流请求路径",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("path", r.URL.Path),
+ )
+ http.Error(w, "无效的路径", http.StatusNotFound)
+ return
+ }
+
+ fromStr := path.Base(r.URL.Path)
+ from, err := types.IDFromString(fromStr)
+ if err != nil {
+ h.lg.Warn(
+ "无法将路径解析为ID",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("path", fromStr),
+ zap.Error(err),
+ )
+ http.Error(w, "invalid from", http.StatusNotFound)
+ return
+ }
+ if h.r.IsIDRemoved(uint64(from)) {
+ h.lg.Warn(
+ "拒绝流,该节点已被移除",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("remote-peer-id-from", from.String()),
+ )
+ http.Error(w, "该节点已被移除", http.StatusGone)
+ return
+ }
+ p := h.peerGetter.Get(from)
+ if p == nil {
+ // 这可能发生在以下情况:
+ // 1.用户启动的远端节点属于不同的集群,且集群ID相同.
+ // 2. 本地etcd落后于集群,无法识别在当前进度之后加入的成员.
+ if urls := r.Header.Get("X-PeerURLs"); urls != "" {
+ h.tr.AddRemote(from, strings.Split(urls, ","))
+ }
+ h.lg.Warn(
+ "在集群中没有找到远端节点",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("remote-peer-id-from", from.String()),
+ zap.String("cluster-id", h.cid.String()),
+ )
+ http.Error(w, "发送方没有发现该节点", http.StatusNotFound)
+ return
+ }
+
+ wto := h.id.String()
+ if gto := r.Header.Get("X-Raft-To"); gto != wto {
+ h.lg.Warn(
+ "忽略流请求; ID 不匹配",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("remote-peer-id-header", gto),
+ zap.String("remote-peer-id-from", from.String()),
+ zap.String("cluster-id", h.cid.String()),
+ )
+ http.Error(w, "to field mismatch", http.StatusPreconditionFailed)
+ return
+ }
+ /* 这个地方需要注意一下,此处并没有包把应答报文发出去,但是具体处理逻辑需要参考net/http中Flush */
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
+
+ c := newCloseNotifier()
+ conn := &outgoingConn{
+ t: t, // 连接类型
+ Writer: w,
+ Flusher: w.(http.Flusher),
+ Closer: c,
+ localID: h.tr.ID,
+ peerID: from,
+ }
+ p.attachOutgoingConn(conn) // 会发streamWriter run中connc操作 用于
+ <-c.closeNotify() // 等待close channel,若一直没数据可读则阻塞
+}
diff --git a/server/etcdserver/api/rafthttp/remote.go b/etcd/etcdserver/api/rafthttp/remote.go
similarity index 91%
rename from server/etcdserver/api/rafthttp/remote.go
rename to etcd/etcdserver/api/rafthttp/remote.go
index f40acbb9802..9b9afada521 100644
--- a/server/etcdserver/api/rafthttp/remote.go
+++ b/etcd/etcdserver/api/rafthttp/remote.go
@@ -15,8 +15,8 @@
package rafthttp
import (
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/raft/v3/raftpb"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
"go.uber.org/zap"
)
@@ -30,8 +30,8 @@ type remote struct {
}
func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote {
- picker := newURLPicker(urls)
- status := newPeerStatus(tr.Logger, tr.ID, id)
+ picker := newURLPicker(urls) // ok
+ status := newPeerStatus(tr.Logger, tr.ID, id) // ok
pipeline := &pipeline{
peerID: id,
tr: tr,
@@ -78,7 +78,6 @@ func (g *remote) send(m raftpb.Message) {
)
}
}
- sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
}
}
diff --git a/server/etcdserver/api/rafthttp/snapshot_sender.go b/etcd/etcdserver/api/rafthttp/snapshot_sender.go
similarity index 86%
rename from server/etcdserver/api/rafthttp/snapshot_sender.go
rename to etcd/etcdserver/api/rafthttp/snapshot_sender.go
index 9b98474fe00..65d29504fc2 100644
--- a/server/etcdserver/api/rafthttp/snapshot_sender.go
+++ b/etcd/etcdserver/api/rafthttp/snapshot_sender.go
@@ -18,23 +18,23 @@ import (
"bytes"
"context"
"io"
+ "io/ioutil"
"net/http"
"time"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/pkg/v3/httputil"
- pioutil "go.etcd.io/etcd/pkg/v3/ioutil"
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/raft/v3"
-
"github.com/dustin/go-humanize"
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ "github.com/ls-2018/etcd_cn/pkg/httputil"
+ pioutil "github.com/ls-2018/etcd_cn/pkg/ioutil"
+
"go.uber.org/zap"
)
-var (
- // timeout for reading snapshot response body
- snapResponseReadTimeout = 5 * time.Second
-)
+// timeout for reading snapshot response body
+var snapResponseReadTimeout = 5 * time.Second
type snapshotSender struct {
from, to types.ID
@@ -66,8 +66,6 @@ func newSnapshotSender(tr *Transport, picker *urlPicker, to types.ID, status *pe
func (s *snapshotSender) stop() { close(s.stopc) }
func (s *snapshotSender) send(merged snap.Message) {
- start := time.Now()
-
m := merged.Message
to := types.ID(m.To).String()
@@ -89,11 +87,6 @@ func (s *snapshotSender) send(merged snap.Message) {
)
}
- snapshotSendInflights.WithLabelValues(to).Inc()
- defer func() {
- snapshotSendInflights.WithLabelValues(to).Dec()
- }()
-
err := s.post(req)
defer merged.CloseWithError(err)
if err != nil {
@@ -121,8 +114,6 @@ func (s *snapshotSender) send(merged snap.Message) {
// machine knows about it, it would pause a while and retry sending
// new snapshot message.
s.r.ReportSnapshot(m.To, raft.SnapshotFailure)
- sentFailures.WithLabelValues(to).Inc()
- snapshotSendFailures.WithLabelValues(to).Inc()
return
}
s.status.activate()
@@ -137,10 +128,6 @@ func (s *snapshotSender) send(merged snap.Message) {
zap.String("size", snapshotSize),
)
}
-
- sentBytes.WithLabelValues(to).Add(float64(merged.TotalSize))
- snapshotSend.WithLabelValues(to).Inc()
- snapshotSendSeconds.WithLabelValues(to).Observe(time.Since(start).Seconds())
}
// post posts the given request.
@@ -168,7 +155,7 @@ func (s *snapshotSender) post(req *http.Request) (err error) {
// prevents from reading the body forever when the other side dies right after
// successfully receives the request body.
time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) })
- body, err := io.ReadAll(resp.Body)
+ body, err := ioutil.ReadAll(resp.Body)
result <- responseAndError{resp, body, err}
}()
diff --git a/server/etcdserver/api/rafthttp/stream.go b/etcd/etcdserver/api/rafthttp/stream.go
similarity index 81%
rename from server/etcdserver/api/rafthttp/stream.go
rename to etcd/etcdserver/api/rafthttp/stream.go
index c8a1f1fb5ea..3dcdfd85ba4 100644
--- a/server/etcdserver/api/rafthttp/stream.go
+++ b/etcd/etcdserver/api/rafthttp/stream.go
@@ -18,18 +18,19 @@ import (
"context"
"fmt"
"io"
+ "io/ioutil"
"net/http"
"path"
"strings"
"sync"
"time"
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/client/pkg/v3/transport"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/pkg/v3/httputil"
- stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
- "go.etcd.io/raft/v3/raftpb"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ "github.com/ls-2018/etcd_cn/pkg/httputil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
@@ -58,7 +59,6 @@ var (
"3.3.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.4.0": {streamTypeMsgAppV2, streamTypeMessage},
"3.5.0": {streamTypeMsgAppV2, streamTypeMessage},
- "3.6.0": {streamTypeMsgAppV2, streamTypeMessage},
}
)
@@ -66,13 +66,13 @@ type streamType string
func (t streamType) endpoint(lg *zap.Logger) string {
switch t {
- case streamTypeMsgAppV2:
+ case streamTypeMsgAppV2: // /raft/stream/msgappv2
return path.Join(RaftStreamPrefix, "msgapp")
- case streamTypeMessage:
+ case streamTypeMessage: // /raft/stream/message
return path.Join(RaftStreamPrefix, "message")
default:
if lg != nil {
- lg.Panic("unhandled stream type", zap.String("stream-type", t.String()))
+ lg.Panic("无法处理的路由", zap.String("stream-type", t.String()))
}
return ""
}
@@ -89,19 +89,17 @@ func (t streamType) String() string {
}
}
-var (
- // linkHeartbeatMessage is a special message used as heartbeat message in
- // link layer. It never conflicts with messages from raft because raft
- // doesn't send out messages without From and To fields.
- linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat}
-)
+// linkHeartbeatMessage is a special message used as heartbeat message in
+// link layer. It never conflicts with messages from raft because raft
+// doesn't send out messages without From and To fields.
+var linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat}
func isLinkHeartbeatMessage(m *raftpb.Message) bool {
return m.Type == raftpb.MsgHeartbeat && m.From == 0 && m.To == 0
}
type outgoingConn struct {
- t streamType
+ t streamType // 连接类型
io.Writer
http.Flusher
io.Closer
@@ -154,12 +152,12 @@ func startStreamWriter(lg *zap.Logger, local, id types.ID, status *peerStatus, f
func (cw *streamWriter) run() {
var (
- msgc chan raftpb.Message
- heartbeatc <-chan time.Time
- t streamType
- enc encoder
- flusher http.Flusher
- batched int
+ msgc chan raftpb.Message // 指向当前streamWriter. msgc字段
+ heartbeatc <-chan time.Time // 定时器会定时向该通道发送信号, 触发心跳消息的发送,该心跳消息与后面介绍的Raft的心跳消息有所不同,该心跳消息的主要目的是为了防止连接长时间不用断升的
+ t streamType // 用来记录消息的版本信息
+ enc encoder // 编码器,负责将消息序列化并写入连接的缓冲区
+ flusher http.Flusher // 负责刷新底层连接,将数据真正发送出去
+ batched int // 当前未Flush的消息个数
)
tickc := time.NewTicker(ConnReadTimeout / 3)
defer tickc.Stop()
@@ -176,19 +174,18 @@ func (cw *streamWriter) run() {
for {
select {
case <-heartbeatc:
+ // 不是raft心跳消息,是为了防止链接超时
err := enc.encode(&linkHeartbeatMessage)
unflushed += linkHeartbeatMessage.Size()
if err == nil {
flusher.Flush()
batched = 0
- sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
unflushed = 0
continue
}
cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error())
- sentFailures.WithLabelValues(cw.peerID.String()).Inc()
cw.close()
if cw.lg != nil {
cw.lg.Warn(
@@ -201,24 +198,23 @@ func (cw *streamWriter) run() {
heartbeatc, msgc = nil, nil
case m := <-msgc:
- err := enc.encode(&m)
+ err := enc.encode(&m) // 格式化消息,如选举消息
if err == nil {
unflushed += m.Size()
-
- if len(msgc) == 0 || batched > streamBufSize/2 {
- flusher.Flush()
- sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
+ // msgc通道中的消息全部发送完成或是未Flush的消息较多,则触发Flush,否则只是递增batched变量
+ if len(msgc) == 0 || batched > streamBufSize/2 { // batched批处理 streamBufSize全局变量 4096
+ flusher.Flush() // 刷新缓冲区,发送到对端.Flush代码为net/http模块
unflushed = 0
batched = 0
} else {
batched++
}
-
+ // 发送完成就返回上层 并没有结束会话
continue
}
cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error())
- cw.close()
+ cw.close() // 表示本次收发消息结束 即http会话结束
if cw.lg != nil {
cw.lg.Warn(
"lost TCP streaming connection with remote peer",
@@ -229,9 +225,8 @@ func (cw *streamWriter) run() {
}
heartbeatc, msgc = nil, nil
cw.r.ReportUnreachable(m.To)
- sentFailures.WithLabelValues(cw.peerID.String()).Inc()
- case conn := <-cw.connc:
+ case conn := <-cw.connc: // 从channel读取conn对象,表示会话已经建立
cw.mu.Lock()
closed := cw.closeUnlocked()
t = conn.t
@@ -253,17 +248,17 @@ func (cw *streamWriter) run() {
zap.String("stream-type", t.String()),
)
}
- flusher = conn.Flusher
+ flusher = conn.Flusher // 记录底层连接对应的Flusher
unflushed = 0
- cw.status.activate()
- cw.closer = conn.Closer
- cw.working = true
+ cw.status.activate() // peerStatus.activeit直为true
+ cw.closer = conn.Closer // 记录底层连接对应的Flusher
+ cw.working = true // 标识当前streamWriter正在运行
cw.mu.Unlock()
if closed {
if cw.lg != nil {
cw.lg.Warn(
- "closed TCP streaming connection with remote peer",
+ "关闭与远端节点的TCP链接",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
@@ -272,19 +267,19 @@ func (cw *streamWriter) run() {
}
if cw.lg != nil {
cw.lg.Info(
- "established TCP streaming connection with remote peer",
+ "与远端节点建立了TCP链接",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
}
- heartbeatc, msgc = tickc.C, cw.msgc
+ heartbeatc, msgc = tickc.C, cw.msgc // 保存心跳和message的通道
case <-cw.stopc:
if cw.close() {
if cw.lg != nil {
cw.lg.Warn(
- "closed TCP streaming connection with remote peer",
+ "关闭与远端节点的TCP链接",
zap.String("stream-writer-type", t.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
@@ -292,7 +287,7 @@ func (cw *streamWriter) run() {
}
if cw.lg != nil {
cw.lg.Info(
- "stopped TCP streaming connection with remote peer",
+ "停止与远端节点的TCP链接",
zap.String("stream-writer-type", t.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
@@ -389,11 +384,10 @@ func (cr *streamReader) start() {
}
func (cr *streamReader) run() {
- t := cr.typ
+ t := cr.typ // msgappv2
if cr.lg != nil {
- cr.lg.Info(
- "started stream reader with remote peer",
+ cr.lg.Info("开始与远程节点进行流式阅读",
zap.String("stream-reader-type", t.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
@@ -409,27 +403,16 @@ func (cr *streamReader) run() {
} else {
cr.status.activate()
if cr.lg != nil {
- cr.lg.Info(
- "established TCP streaming connection with remote peer",
- zap.String("stream-reader-type", cr.typ.String()),
- zap.String("local-member-id", cr.tr.ID.String()),
- zap.String("remote-peer-id", cr.peerID.String()),
- )
+ cr.lg.Info("已建立的TCP流媒体连接与远程节点", zap.String("stream-reader-type", cr.typ.String()), zap.String("local-member-id", cr.tr.ID.String()), zap.String("remote-peer-id", cr.peerID.String()))
}
err = cr.decodeLoop(rc, t)
if cr.lg != nil {
- cr.lg.Warn(
- "lost TCP streaming connection with remote peer",
- zap.String("stream-reader-type", cr.typ.String()),
- zap.String("local-member-id", cr.tr.ID.String()),
- zap.String("remote-peer-id", cr.peerID.String()),
- zap.Error(err),
- )
+ cr.lg.Warn("丢失TCP流媒体连接与远程节点", zap.String("stream-reader-type", cr.typ.String()), zap.String("local-member-id", cr.tr.ID.String()), zap.String("remote-peer-id", cr.peerID.String()), zap.Error(err))
}
switch {
- // all data is read out
+ // 读取了所有数据
case err == io.EOF:
- // connection is closed by the remote
+ // 远端节点关闭了链接
case transport.IsClosedConnError(err):
default:
cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error())
@@ -490,7 +473,7 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
// gofail: labelRaftDropHeartbeat:
for {
- m, err := dec.decode()
+ m, err := dec.decode() // 阻塞等待消息
if err != nil {
cr.mu.Lock()
cr.close()
@@ -500,7 +483,6 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
// gofail-go: var raftDropHeartbeat struct{}
// continue labelRaftDropHeartbeat
- receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size()))
cr.mu.Lock()
paused := cr.paused
@@ -523,7 +505,7 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
}
select {
- case recvc <- m:
+ case recvc <- m: // 将消息写到channel中 channel另外一段是rafthttp/peer.go startPeer
default:
if cr.status.isActive() {
if cr.lg != nil {
@@ -548,7 +530,6 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
)
}
}
- recvFailures.WithLabelValues(types.ID(m.From).String()).Inc()
}
}
}
@@ -628,7 +609,7 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID)
case http.StatusPreconditionFailed:
- b, err := io.ReadAll(resp.Body)
+ b, err := ioutil.ReadAll(resp.Body)
if err != nil {
cr.picker.unreachable(u)
return nil, err
@@ -640,7 +621,7 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
case errIncompatibleVersion.Error():
if cr.lg != nil {
cr.lg.Warn(
- "request sent was ignored by remote peer due to server version incompatibility",
+ "request sent was ignored by remote peer due to etcd version incompatibility",
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
zap.Error(errIncompatibleVersion),
diff --git a/etcd/etcdserver/api/rafthttp/transport_api.go b/etcd/etcdserver/api/rafthttp/transport_api.go
new file mode 100644
index 00000000000..f8e2811f0a0
--- /dev/null
+++ b/etcd/etcdserver/api/rafthttp/transport_api.go
@@ -0,0 +1,421 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+
+ "github.com/xiang90/probing"
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+)
+
+type Raft interface {
+ Process(ctx context.Context, m raftpb.Message) error
+ IsIDRemoved(id uint64) bool
+ ReportUnreachable(id uint64)
+ ReportSnapshot(id uint64, status raft.SnapshotStatus)
+}
+
+type Transporter interface {
+ // Start starts the given Transporter.
+ // Start必须是called before calling other functions in the interface.
+ Start() error
+ Handler() http.Handler
+ // Send sends out the given messages to the remote peers.
+ // Each message has a To field, which is an id that maps
+ // to an existing peer in the transport.
+ // If the id cannot be found in the transport, the message
+ // will be ignored.
+ Send(m []raftpb.Message)
+ // SendSnapshot sends out the given snapshot message to a remote peer.
+ // The behavior of SendSnapshot is similar to Send.
+ SendSnapshot(m snap.Message)
+ AddRemote(id types.ID, urls []string)
+ AddPeer(id types.ID, urls []string) // 链接远端的节点
+ RemovePeer(id types.ID) // 移除远端节点的链接
+ // RemoveAllPeers removes all the existing peers in the transport.
+ RemoveAllPeers()
+ // UpdatePeer updates the peer urls of the peer with the given id.
+ // It is the caller's responsibility to ensure the urls are all valid,
+ // or it panics.
+ UpdatePeer(id types.ID, urls []string)
+ ActiveSince(id types.ID) time.Time // 返回与给定id的对等体的连接开始活动的时间
+ // ActivePeers returns the number of active peers.
+ ActivePeers() int
+ // Stop closes the connections and stops the transporter.
+ Stop()
+}
+
+// Transport implements Transporter interface. It provides the functionality
+// to send raft messages to peers, and receive raft messages from peers.
+// User should call Handler method to get a handler to serve requests
+// received from peerURLs.
+// User needs to call Start before calling other functions, and call
+// Stop when the Transport is no longer used.
+type Transport struct {
+ Logger *zap.Logger
+ DialTimeout time.Duration // maximum duration before timing out dial of the request
+ // DialRetryFrequency defines the frequency of streamReader dial retrial attempts;
+ // a distinct rate limiter is created per every peer (default value: 10 events/sec)
+ DialRetryFrequency rate.Limit
+ TLSInfo transport.TLSInfo // TLS information used when creating connection
+ ID types.ID // 本节点ID
+ URLs types.URLs // local peer URLs
+ ClusterID types.ID // 集群标识符
+ Raft Raft // raft状态机,Transport向其转发收到的信息并报告状态.
+ Snapshotter *snap.Snapshotter
+ ServerStats *stats.ServerStats // used to record general transportation statistics
+ // used to record transportation statistics with followers when
+ // performing as leader in raft protocol
+ LeaderStats *stats.LeaderStats
+ // ErrorC is used to report detected critical errors, e.g.,
+ // the member has been permanently removed from the cluster
+ // When an error is received from ErrorC, user should stop raft state
+ // machine and thus stop the Transport.
+ ErrorC chan error
+ streamRt http.RoundTripper //( http.RoundTripper类型): Stream消息通道中使用的http. RoundTripper实例.
+ pipelineRt http.RoundTripper // ( http.RoundTripper 类型):Pipeline 消息通道中使用的http.RoundTripper实例
+ mu sync.RWMutex // protect the remote and peer map
+ peers map[types.ID]Peer
+ remotes map[types.ID]*remote // 类型): remote 中只封装了pipeline 实例,remote主要负责发送快照数据,帮助新加入的节点快速追赶上其他节点的数据.
+ pipelineProber probing.Prober
+ streamProber probing.Prober
+}
+
+func (t *Transport) Start() error {
+ var err error
+ t.streamRt, err = newStreamRoundTripper(t.TLSInfo, t.DialTimeout)
+ if err != nil {
+ return err
+ }
+ t.pipelineRt, err = NewRoundTripper(t.TLSInfo, t.DialTimeout)
+ if err != nil {
+ return err
+ }
+ t.remotes = make(map[types.ID]*remote)
+ t.peers = make(map[types.ID]Peer)
+ t.pipelineProber = probing.NewProber(t.pipelineRt)
+ t.streamProber = probing.NewProber(t.streamRt)
+
+ // If client didn't provide dial retry frequency, use the default
+ // (100ms backoff between attempts to create a new stream),
+ // so it doesn't bring too much overhead when retry.
+ if t.DialRetryFrequency == 0 {
+ t.DialRetryFrequency = rate.Every(100 * time.Millisecond)
+ }
+ return nil
+}
+
+// Handler ✅
+func (t *Transport) Handler() http.Handler {
+ //_ = etcdserver.EtcdServer{} // loop import
+ pipelineHandler := newPipelineHandler(t, t.Raft, t.ClusterID)
+ streamHandler := newStreamHandler(t, t, t.Raft, t.ID, t.ClusterID)
+ snapHandler := newSnapshotHandler(t, t.Raft, t.Snapshotter, t.ClusterID)
+ mux := http.NewServeMux()
+ mux.Handle(RaftPrefix, pipelineHandler) // /raft
+ mux.Handle(RaftStreamPrefix+"/", streamHandler) // /raft/stream/
+ mux.Handle(RaftSnapshotPrefix, snapHandler) // /raft/snapshot // ✅
+ mux.Handle(ProbingPrefix, probing.NewHandler()) // /raft/probing // ✅
+ return mux
+}
+
+func (t *Transport) Get(id types.ID) Peer {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ return t.peers[id]
+}
+
+// Send ok
+func (t *Transport) Send(msgs []raftpb.Message) {
+ for _, m := range msgs {
+ if m.To == 0 {
+ // 忽略故意丢弃的消息
+ continue
+ }
+ to := types.ID(m.To)
+
+ t.mu.RLock()
+ p, pok := t.peers[to]
+ g, rok := t.remotes[to]
+ t.mu.RUnlock()
+
+ if pok {
+ if m.Type == raftpb.MsgApp {
+ t.ServerStats.SendAppendReq(m.Size())
+ }
+ p.send(m)
+ continue
+ }
+ if rok {
+ g.send(m)
+ continue
+ }
+ if t.Logger != nil {
+ t.Logger.Debug(
+ "忽略消息发送请求;未知远程对等目标",
+ zap.String("type", m.Type.String()),
+ zap.String("unknown-target-peer-id", to.String()),
+ )
+ }
+ }
+}
+
+func (t *Transport) Stop() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ for _, r := range t.remotes {
+ r.stop()
+ }
+ for _, p := range t.peers {
+ p.stop()
+ }
+ t.pipelineProber.RemoveAll()
+ t.streamProber.RemoveAll()
+ if tr, ok := t.streamRt.(*http.Transport); ok {
+ tr.CloseIdleConnections()
+ }
+ if tr, ok := t.pipelineRt.(*http.Transport); ok {
+ tr.CloseIdleConnections()
+ }
+ t.peers = nil
+ t.remotes = nil
+}
+
+// CutPeer drops messages to the specified peer.
+func (t *Transport) CutPeer(id types.ID) {
+ t.mu.RLock()
+ p, pok := t.peers[id]
+ g, gok := t.remotes[id]
+ t.mu.RUnlock()
+
+ if pok {
+ p.(Pausable).Pause()
+ }
+ if gok {
+ g.Pause()
+ }
+}
+
+// MendPeer recovers the message dropping behavior of the given peer.
+func (t *Transport) MendPeer(id types.ID) {
+ t.mu.RLock()
+ p, pok := t.peers[id]
+ g, gok := t.remotes[id]
+ t.mu.RUnlock()
+
+ if pok {
+ p.(Pausable).Resume()
+ }
+ if gok {
+ g.Resume()
+ }
+}
+
+// AddRemote 添加远程节点
+func (t *Transport) AddRemote(id types.ID, us []string) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.remotes == nil {
+ // TODO there's no clean way to shutdown the golang http etcd
+ // (see: https://github.com/golang/go/issues/4674) before
+ // stopping the transport; ignore any new connections.
+ return
+ }
+ if _, ok := t.peers[id]; ok {
+ return // 存在
+ }
+ if _, ok := t.remotes[id]; ok {
+ return // 存在
+ }
+ urls, err := types.NewURLs(us)
+ if err != nil {
+ if t.Logger != nil {
+ t.Logger.Panic("失败 NewURLs", zap.Strings("urls", us), zap.Error(err))
+ }
+ }
+ t.remotes[id] = startRemote(t, urls, id)
+
+ if t.Logger != nil {
+ t.Logger.Info("添加一个远端节点的通信地址",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("remote-peer-id", id.String()),
+ zap.Strings("remote-peer-urls", us),
+ )
+ }
+}
+
+// AddPeer 添加伙伴节点
+func (t *Transport) AddPeer(id types.ID, us []string) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if t.peers == nil {
+ panic("transport stopped")
+ }
+ if _, ok := t.peers[id]; ok {
+ return
+ }
+ urls, err := types.NewURLs(us)
+ if err != nil {
+ if t.Logger != nil {
+ t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
+ }
+ }
+ fs := t.LeaderStats.Follower(id.String())
+ t.peers[id] = startPeer(t, urls, id, fs)
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "added remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("remote-peer-id", id.String()),
+ zap.Strings("remote-peer-urls", us),
+ )
+ }
+}
+
+func (t *Transport) RemovePeer(id types.ID) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ t.removePeer(id)
+}
+
+func (t *Transport) RemoveAllPeers() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ for id := range t.peers {
+ t.removePeer(id)
+ }
+}
+
+// the caller of this function must have the peers mutex.
+func (t *Transport) removePeer(id types.ID) {
+ if peer, ok := t.peers[id]; ok {
+ peer.stop()
+ } else {
+ if t.Logger != nil {
+ t.Logger.Panic("unexpected removal of unknown remote peer", zap.String("remote-peer-id", id.String()))
+ }
+ }
+ delete(t.peers, id)
+ delete(t.LeaderStats.Followers, id.String())
+ t.pipelineProber.Remove(id.String())
+ t.streamProber.Remove(id.String())
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "removed remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ )
+ }
+}
+
+func (t *Transport) UpdatePeer(id types.ID, us []string) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ // TODO: return error or just panic?
+ if _, ok := t.peers[id]; !ok {
+ return
+ }
+ urls, err := types.NewURLs(us)
+ if err != nil {
+ if t.Logger != nil {
+ t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
+ }
+ }
+ t.peers[id].update(urls)
+
+ t.pipelineProber.Remove(id.String())
+ t.streamProber.Remove(id.String())
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "updated remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", us),
+ )
+ }
+}
+
+func (t *Transport) ActiveSince(id types.ID) time.Time {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if p, ok := t.peers[id]; ok {
+ return p.activeSince()
+ }
+ return time.Time{}
+}
+
+func (t *Transport) SendSnapshot(m snap.Message) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ p := t.peers[types.ID(m.To)]
+ if p == nil {
+ m.CloseWithError(errMemberNotFound)
+ return
+ }
+ p.sendSnap(m)
+}
+
+// Pausable is a testing interface for pausing transport traffic.
+type Pausable interface {
+ Pause()
+ Resume()
+}
+
+func (t *Transport) Pause() {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ for _, p := range t.peers {
+ p.(Pausable).Pause()
+ }
+}
+
+func (t *Transport) Resume() {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ for _, p := range t.peers {
+ p.(Pausable).Resume()
+ }
+}
+
+// ActivePeers returns a channel that closes when an initial
+// peer connection has been established. Use this to wait until the
+// first peer connection becomes active.
+func (t *Transport) ActivePeers() (cnt int) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ for _, p := range t.peers {
+ if !p.activeSince().IsZero() {
+ cnt++
+ }
+ }
+ return cnt
+}
diff --git a/server/etcdserver/api/rafthttp/urlpick.go b/etcd/etcdserver/api/rafthttp/urlpick.go
similarity index 96%
rename from server/etcdserver/api/rafthttp/urlpick.go
rename to etcd/etcdserver/api/rafthttp/urlpick.go
index fc6054a78ab..659aa3d675f 100644
--- a/server/etcdserver/api/rafthttp/urlpick.go
+++ b/etcd/etcdserver/api/rafthttp/urlpick.go
@@ -18,7 +18,7 @@ import (
"net/url"
"sync"
- "go.etcd.io/etcd/client/pkg/v3/types"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
)
type urlPicker struct {
diff --git a/etcd/etcdserver/api/rafthttp/util.go b/etcd/etcdserver/api/rafthttp/util.go
new file mode 100644
index 00000000000..36454886a4d
--- /dev/null
+++ b/etcd/etcdserver/api/rafthttp/util.go
@@ -0,0 +1,193 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+var (
+ errMemberRemoved = fmt.Errorf("成员已经从集群中移除")
+ errMemberNotFound = fmt.Errorf("成员没有找到")
+)
+
+// NewListener returns a listener for raft message transfer between peers.
+// It uses timeout listener to identify broken streams promptly.
+func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) {
+ return transport.NewListenerWithOpts(u.Host, u.Scheme, transport.WithTLSInfo(tlsinfo), transport.WithTimeout(ConnReadTimeout, ConnWriteTimeout))
+}
+
+// NewRoundTripper 返回一个roundTripper,用于向远程peer的rafthttp监听器发送请求.
+func NewRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
+ // 它使用超时传输,与远程超时listeners 配对.它没有设置读/写超时,因为请求中的信息在读出响应之前可能需要很长的时间来写出来.
+ return transport.NewTimeoutTransport(tlsInfo, dialTimeout, 0, 0)
+}
+
+// newStreamRoundTripper returns a roundTripper used to send stream requests
+// to rafthttp listener of remote peers.
+// Read/write timeout is set for stream roundTripper to promptly
+// find out broken status, which minimizes the number of messages
+// sent on broken connection.
+func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
+ return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout)
+}
+
+// createPostRequest creates a HTTP POST request that sends raft message.
+func createPostRequest(lg *zap.Logger, u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request {
+ uu := u
+ uu.Path = path
+ req, err := http.NewRequest("POST", uu.String(), body)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("unexpected new request error", zap.Error(err))
+ }
+ }
+ req.Header.Set("Content-Type", ct)
+ req.Header.Set("X-Server-From", from.String())
+ req.Header.Set("X-Server-Version", version.Version)
+ req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
+ req.Header.Set("X-Etcd-Cluster-ID", cid.String())
+ setPeerURLsHeader(req, urls)
+
+ return req
+}
+
+// checkPostResponse checks the response of the HTTP POST request that sends
+// raft message.
+func checkPostResponse(lg *zap.Logger, resp *http.Response, body []byte, req *http.Request, to types.ID) error {
+ switch resp.StatusCode {
+ case http.StatusPreconditionFailed:
+ switch strings.TrimSuffix(string(body), "\n") {
+ case errIncompatibleVersion.Error():
+ if lg != nil {
+ lg.Error(
+ "request sent was ignored by peer",
+ zap.String("remote-peer-id", to.String()),
+ )
+ }
+ return errIncompatibleVersion
+ case errClusterIDMismatch.Error():
+ if lg != nil {
+ lg.Error(
+ "request sent was ignored due to cluster ID mismatch",
+ zap.String("remote-peer-id", to.String()),
+ zap.String("remote-peer-cluster-id", resp.Header.Get("X-Etcd-Cluster-ID")),
+ zap.String("local-member-cluster-id", req.Header.Get("X-Etcd-Cluster-ID")),
+ )
+ }
+ return errClusterIDMismatch
+ default:
+ return fmt.Errorf("unhandled error %q when precondition failed", string(body))
+ }
+ case http.StatusForbidden:
+ return errMemberRemoved
+ case http.StatusNoContent:
+ return nil
+ default:
+ return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String())
+ }
+}
+
+// reportCriticalError reports the given error through sending it into
+// the given error channel.
+// If the error channel is filled up when sending error, it drops the error
+// because the fact that error has happened is reported, which is
+// good enough.
+func reportCriticalError(err error, errc chan<- error) {
+ select {
+ case errc <- err:
+ default:
+ }
+}
+
+// setPeerURLsHeader reports local urls for peer discovery
+func setPeerURLsHeader(req *http.Request, urls types.URLs) {
+ if urls == nil {
+ // often not set in unit tests
+ return
+ }
+ peerURLs := make([]string, urls.Len())
+ for i := range urls {
+ peerURLs[i] = urls[i].String()
+ }
+ req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ","))
+}
+
+// ----------------------------------------- OVER ----------------------------------------------------
+
+// addRemoteFromRequest 根据http请求头添加一个远程对等体
+func addRemoteFromRequest(tr Transporter, r *http.Request) {
+ if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err == nil {
+ if urls := r.Header.Get("X-PeerURLs"); urls != "" {
+ tr.AddRemote(from, strings.Split(urls, ","))
+ }
+ }
+}
+
+func serverVersion(h http.Header) *semver.Version {
+ verStr := h.Get("X-Server-Version")
+ if verStr == "" {
+ verStr = "2.0.0"
+ }
+ return semver.Must(semver.NewVersion(verStr))
+}
+
+func minClusterVersion(h http.Header) *semver.Version {
+ verStr := h.Get("X-Min-Cluster-Version")
+ if verStr == "" {
+ verStr = "2.0.0"
+ }
+ return semver.Must(semver.NewVersion(verStr))
+}
+
+// compareMajorMinorVersion 比较两个版本
+func compareMajorMinorVersion(a, b *semver.Version) int {
+ na := &semver.Version{Major: a.Major, Minor: a.Minor}
+ nb := &semver.Version{Major: b.Major, Minor: b.Minor}
+ switch {
+ case na.LessThan(*nb):
+ return -1
+ case nb.LessThan(*na):
+ return 1
+ default:
+ return 0
+ }
+}
+
+// checkVersionCompatibility 检查给定的版本是否与本地的版本兼容
+func checkVersionCompatibility(name string, server, minCluster *semver.Version) (localServer *semver.Version, localMinCluster *semver.Version, err error) {
+ localServer = semver.Must(semver.NewVersion(version.Version))
+ localMinCluster = semver.Must(semver.NewVersion(version.MinClusterVersion))
+ if compareMajorMinorVersion(server, localMinCluster) == -1 {
+ return localServer, localMinCluster, fmt.Errorf("远端版本太低: remote[%s]=%s, local=%s", name, server, localServer)
+ }
+ if compareMajorMinorVersion(minCluster, localServer) == 1 {
+ return localServer, localMinCluster, fmt.Errorf("本地版本太低: remote[%s]=%s, local=%s", name, server, localServer)
+ }
+ return localServer, localMinCluster, nil
+}
diff --git a/etcd/etcdserver/api/snap/doc.go b/etcd/etcdserver/api/snap/doc.go
new file mode 100644
index 00000000000..adad91d742c
--- /dev/null
+++ b/etcd/etcdserver/api/snap/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package snap handles Raft nodes' states with snapshots.
+// The snapshot logic is internal to etcd etcd and raft package.
+package snap
diff --git a/server/etcdserver/api/snap/message.go b/etcd/etcdserver/api/snap/message.go
similarity index 88%
rename from server/etcdserver/api/snap/message.go
rename to etcd/etcdserver/api/snap/message.go
index 2b4090c981d..0da6c10a19c 100644
--- a/server/etcdserver/api/snap/message.go
+++ b/etcd/etcdserver/api/snap/message.go
@@ -17,18 +17,19 @@ package snap
import (
"io"
- "go.etcd.io/etcd/pkg/v3/ioutil"
- "go.etcd.io/raft/v3/raftpb"
+ "github.com/ls-2018/etcd_cn/pkg/ioutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
)
// Message is a struct that contains a raft Message and a ReadCloser. The type
-// of raft message MUST be MsgSnap, which contains the raft meta-data and an
+// of raft message必须是MsgSnap, which contains the raft meta-data and an
// additional data []byte field that contains the snapshot of the actual state
// machine.
// Message contains the ReadCloser field for handling large snapshot. This avoid
// copying the entire snapshot into a byte array, which consumes a lot of memory.
//
// User of Message should close the Message after sending it.
+// Message是所有消息的抽象,包括了各种类型消息所需要的字段
type Message struct {
raftpb.Message
ReadCloser io.ReadCloser
diff --git a/etcd/etcdserver/api/snap/over_db.go b/etcd/etcdserver/api/snap/over_db.go
new file mode 100644
index 00000000000..e3a26980a93
--- /dev/null
+++ b/etcd/etcdserver/api/snap/over_db.go
@@ -0,0 +1,92 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+)
+
+var ErrNoDBSnapshot = errors.New("snap: 快照文件不存在")
+
+// SaveDBFrom 从给定的reader中保存数据库的快照.它保证 save操作是原子性的.
+func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) {
+ f, err := ioutil.TempFile(s.dir, "tmp")
+ if err != nil {
+ return 0, err
+ }
+ var n int64
+ n, err = io.Copy(f, r)
+ if err == nil {
+ err = fileutil.Fsync(f)
+ }
+ f.Close()
+ if err != nil {
+ os.Remove(f.Name())
+ return n, err
+ }
+ fn := s.dbFilePath(id)
+ if fileutil.Exist(fn) {
+ os.Remove(f.Name())
+ return n, nil
+ }
+ err = os.Rename(f.Name(), fn)
+ if err != nil {
+ os.Remove(f.Name())
+ return n, err
+ }
+
+ s.lg.Info(
+ "保存快照到硬盘",
+ zap.String("path", fn),
+ zap.Int64("bytes", n),
+ zap.String("size", humanize.Bytes(uint64(n))),
+ )
+
+ return n, nil
+}
+
+// DBFilePath 返回给定id的数据库快照的文件路径.如果该快照不存在,则返回错误.
+func (s *Snapshotter) DBFilePath(id uint64) (string, error) {
+ if _, err := fileutil.ReadDir(s.dir); err != nil {
+ return "", err
+ }
+ fn := s.dbFilePath(id)
+ if fileutil.Exist(fn) {
+ return fn, nil
+ }
+ if s.lg != nil {
+ s.lg.Warn(
+ "查找快照失败 [SNAPSHOT-INDEX].snap.db",
+ zap.Uint64("snapshot-index", id),
+ zap.String("snapshot-file-path", fn),
+ zap.Error(ErrNoDBSnapshot),
+ )
+ }
+ return "", ErrNoDBSnapshot
+}
+
+func (s *Snapshotter) dbFilePath(id uint64) string {
+ return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id))
+}
diff --git a/etcd/etcdserver/api/snap/snappb/over.go b/etcd/etcdserver/api/snap/snappb/over.go
new file mode 100644
index 00000000000..a048b69e0ca
--- /dev/null
+++ b/etcd/etcdserver/api/snap/snappb/over.go
@@ -0,0 +1,30 @@
+package snappb
+
+import (
+ "encoding/json"
+)
+
+type temp struct {
+ Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
+ Data string `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+ t := temp{
+ Crc: m.Crc,
+ Data: string(m.Data),
+ }
+ return json.Marshal(t)
+}
+
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+ t := temp{
+ Crc: m.Crc,
+ Data: string(m.Data),
+ }
+
+ err := json.Unmarshal(dAtA, m)
+ m.Crc = t.Crc
+ m.Data = []byte(t.Data)
+ return err
+}
diff --git a/etcd/etcdserver/api/snap/snappb/snap.pb.go b/etcd/etcdserver/api/snap/snappb/snap.pb.go
new file mode 100644
index 00000000000..ad51a8f9c20
--- /dev/null
+++ b/etcd/etcdserver/api/snap/snappb/snap.pb.go
@@ -0,0 +1,73 @@
+// Code generated by protoc-gen-gogo.
+// source: snap.proto
+
+package snappb
+
+import (
+ "encoding/json"
+ fmt "fmt"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+
+var (
+ _ = fmt.Errorf
+ _ = math.Inf
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Snapshot struct {
+ Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f2e3c045ebf84d00, []int{0}
+}
+
+func init() {
+ proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
+}
+
+func init() { proto.RegisterFile("snap.proto", fileDescriptor_f2e3c045ebf84d00) }
+
+var fileDescriptor_f2e3c045ebf84d00 = []byte{
+ // 126 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
+ 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
+ 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c,
+ 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb,
+ 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24,
+ 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1,
+ 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e,
+ 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00,
+}
+
+func (m *Snapshot) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func sovSnap(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+
+var (
+ ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupSnap = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/server/etcdserver/api/snap/snappb/snap.proto b/etcd/etcdserver/api/snap/snappb/snap.proto
similarity index 100%
rename from server/etcdserver/api/snap/snappb/snap.proto
rename to etcd/etcdserver/api/snap/snappb/snap.proto
diff --git a/etcd/etcdserver/api/snap/snapshotter.go b/etcd/etcdserver/api/snap/snapshotter.go
new file mode 100644
index 00000000000..9490a261f55
--- /dev/null
+++ b/etcd/etcdserver/api/snap/snapshotter.go
@@ -0,0 +1,296 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap/snappb"
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
+ pioutil "github.com/ls-2018/etcd_cn/pkg/ioutil"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/raft"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+
+ "go.uber.org/zap"
+)
+
+const snapSuffix = ".snap"
+
+var (
+ ErrNoSnapshot = errors.New("snap: no available snapshot")
+ ErrEmptySnapshot = errors.New("snap: empty snapshot")
+ ErrCRCMismatch = errors.New("snap: crc mismatch")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+ // 一个可以出现在snap文件夹中的有效文件的映射.
+ validFiles = map[string]bool{
+ "db": true,
+ }
+)
+
+// Snapshotter 快照管理器
+type Snapshotter struct {
+ lg *zap.Logger
+ dir string
+}
+
+func New(lg *zap.Logger, dir string) *Snapshotter {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &Snapshotter{
+ lg: lg,
+ dir: dir,
+ }
+}
+
+func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error {
+ if raft.IsEmptySnap(snapshot) {
+ return nil
+ }
+ return s.save(&snapshot)
+}
+
+// 保存一个快照
+func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error {
+ fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix)
+ b := pbutil.MustMarshal(snapshot)
+ crc := crc32.Update(0, crcTable, b)
+ snap := snappb.Snapshot{Crc: crc, Data: b}
+ d, err := snap.Marshal()
+ if err != nil {
+ return err
+ }
+
+ spath := filepath.Join(s.dir, fname)
+ err = pioutil.WriteAndSyncFile(spath, d, 0o666)
+
+ if err != nil {
+ s.lg.Warn("写快照文件失败", zap.String("path", spath), zap.Error(err))
+ rerr := os.Remove(spath)
+ if rerr != nil {
+ s.lg.Warn("删除损坏的snap文件失败", zap.String("path", spath), zap.Error(err))
+ }
+ return err
+ }
+
+ return nil
+}
+
+// Load 返回最新的快照
+func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
+ return s.loadMatching(func(*raftpb.Snapshot) bool { return true })
+}
+
+// LoadNewestAvailable 返回最新的快照
+func (s *Snapshotter) LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error) {
+ return s.loadMatching(func(snapshot *raftpb.Snapshot) bool {
+ m := snapshot.Metadata
+ // 倒着匹配
+ // 存在的、wal记录的,寻找最新的快照
+ for i := len(walSnaps) - 1; i >= 0; i-- {
+ if m.Term == walSnaps[i].Term && m.Index == walSnaps[i].Index {
+ return true
+ }
+ }
+ return false
+ })
+}
+
+// loadMatching 返回最新的快照
+func (s *Snapshotter) loadMatching(matchFn func(*raftpb.Snapshot) bool) (*raftpb.Snapshot, error) {
+ names, err := s.snapNames() // 加载快照目录下的快照
+ if err != nil {
+ return nil, err
+ }
+ var snap *raftpb.Snapshot
+ for _, name := range names {
+ if snap, err = loadSnap(s.lg, s.dir, name); err == nil && matchFn(snap) {
+ return snap, nil
+ }
+ }
+ return nil, ErrNoSnapshot
+}
+
+// 判断该文件能不能读取
+func loadSnap(lg *zap.Logger, dir, name string) (*raftpb.Snapshot, error) {
+ fpath := filepath.Join(dir, name)
+ snap, err := Read(lg, fpath)
+ if err != nil {
+ brokenPath := fpath + ".broken"
+ if lg != nil {
+ lg.Warn("failed to read a snap file", zap.String("path", fpath), zap.Error(err))
+ }
+ if rerr := os.Rename(fpath, brokenPath); rerr != nil {
+ if lg != nil {
+ lg.Warn("failed to rename a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath), zap.Error(rerr))
+ }
+ } else {
+ if lg != nil {
+ lg.Warn("renamed to a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath))
+ }
+ }
+ }
+ return snap, err
+}
+
+// Read reads the snapshot named by snapname and returns the snapshot.
+func Read(lg *zap.Logger, snapname string) (*raftpb.Snapshot, error) {
+ b, err := ioutil.ReadFile(snapname)
+ if err != nil {
+ if lg != nil {
+ lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err))
+ }
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ if lg != nil {
+ lg.Warn("failed to read empty snapshot file", zap.String("path", snapname))
+ }
+ return nil, ErrEmptySnapshot
+ }
+
+ var serializedSnap snappb.Snapshot
+ if err = serializedSnap.Unmarshal(b); err != nil {
+ if lg != nil {
+ lg.Warn("failed to unmarshal snappb.Snapshot", zap.String("path", snapname), zap.Error(err))
+ }
+ return nil, err
+ }
+
+ if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 {
+ if lg != nil {
+ lg.Warn("failed to read empty snapshot data", zap.String("path", snapname))
+ }
+ return nil, ErrEmptySnapshot
+ }
+
+ crc := crc32.Update(0, crcTable, serializedSnap.Data)
+ if crc != serializedSnap.Crc {
+ if lg != nil {
+ lg.Warn("snap file is corrupt",
+ zap.String("path", snapname),
+ zap.Uint32("prev-crc", serializedSnap.Crc),
+ zap.Uint32("new-crc", crc),
+ )
+ }
+ return nil, ErrCRCMismatch
+ }
+
+ var snap raftpb.Snapshot
+ if err = snap.Unmarshal(serializedSnap.Data); err != nil {
+ if lg != nil {
+ lg.Warn("failed to unmarshal raftpb.Snapshot", zap.String("path", snapname), zap.Error(err))
+ }
+ return nil, err
+ }
+ return &snap, nil
+}
+
+// snapNames 返回快照的文件名,按逻辑时间顺序(从最新到最旧).如果没有可用的快照,将返回ErrNoSnapshot.
+func (s *Snapshotter) snapNames() ([]string, error) {
+ dir, err := os.Open(s.dir) // ./raftexample/db/raftexample-1-snap
+ if err != nil {
+ return nil, err
+ }
+ defer dir.Close()
+ names, err := dir.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ filenames, err := s.cleanupSnapdir(names) // 清除临时快照
+ if err != nil {
+ return nil, err
+ }
+ snaps := checkSuffix(s.lg, filenames)
+ if len(snaps) == 0 {
+ return nil, ErrNoSnapshot
+ }
+ sort.Sort(sort.Reverse(sort.StringSlice(snaps)))
+ return snaps, nil
+}
+
+// 检查文件名
+func checkSuffix(lg *zap.Logger, names []string) []string {
+ snaps := []string{}
+ for i := range names {
+ if strings.HasSuffix(names[i], snapSuffix) { // ".snap"
+ snaps = append(snaps, names[i])
+ } else {
+ // 一个可以出现在snap文件夹中的有效文件的映射.
+ if _, ok := validFiles[names[i]]; !ok {
+ if lg != nil {
+ lg.Warn("发现了未期待的文件在快照目录下; 跳过", zap.String("path", names[i]))
+ }
+ }
+ }
+ }
+ return snaps
+}
+
+// cleanupSnapdir 清除临时快照
+func (s *Snapshotter) cleanupSnapdir(filenames []string) (names []string, err error) {
+ names = make([]string, 0, len(filenames))
+ for _, filename := range filenames {
+ if strings.HasPrefix(filename, "db.tmp") {
+ s.lg.Info("found orphaned defragmentation file; deleting", zap.String("path", filename))
+ if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
+ return names, fmt.Errorf("failed to remove orphaned .snap.db file %s: %v", filename, rmErr)
+ }
+ } else {
+ names = append(names, filename)
+ }
+ }
+ return names, nil
+}
+
+func (s *Snapshotter) ReleaseSnapDBs(snap raftpb.Snapshot) error {
+ dir, err := os.Open(s.dir)
+ if err != nil {
+ return err
+ }
+ defer dir.Close()
+ filenames, err := dir.Readdirnames(-1)
+ if err != nil {
+ return err
+ }
+ for _, filename := range filenames {
+ if strings.HasSuffix(filename, ".snap.db") {
+ hexIndex := strings.TrimSuffix(filepath.Base(filename), ".snap.db")
+ index, err := strconv.ParseUint(hexIndex, 16, 64)
+ if err != nil {
+ s.lg.Error("failed to parse index from filename", zap.String("path", filename), zap.String("error", err.Error()))
+ continue
+ }
+ if index < snap.Metadata.Index {
+ s.lg.Info("found orphaned .snap.db file; deleting", zap.String("path", filename))
+ if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
+ s.lg.Error("failed to remove orphaned .snap.db file", zap.String("path", filename), zap.String("error", rmErr.Error()))
+ }
+ }
+ }
+ }
+ return nil
+}
diff --git a/etcd/etcdserver/api/v2auth/auth.go b/etcd/etcdserver/api/v2auth/auth.go
new file mode 100644
index 00000000000..f2b7cdca56a
--- /dev/null
+++ b/etcd/etcdserver/api/v2auth/auth.go
@@ -0,0 +1,670 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2auth implements etcd authentication.
+package v2auth
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "path"
+ "reflect"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+ "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+)
+
+const (
+ // StorePermsPrefix is the internal prefix of the storage layer dedicated to storing user data.
+ StorePermsPrefix = "/2"
+
+ // RootRoleName is the name of the ROOT role, with privileges to manage the cluster.
+ RootRoleName = "root"
+
+ // GuestRoleName is the name of the role that defines the privileges of an unauthenticated user.
+ GuestRoleName = "guest"
+)
+
+var rootRole = Role{
+ Role: RootRoleName,
+ Permissions: Permissions{
+ KV: RWPermission{
+ Read: []string{"/*"},
+ Write: []string{"/*"},
+ },
+ },
+}
+
+var guestRole = Role{
+ Role: GuestRoleName,
+ Permissions: Permissions{
+ KV: RWPermission{
+ Read: []string{"/*"},
+ Write: []string{"/*"},
+ },
+ },
+}
+
+type doer interface {
+ Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error)
+}
+
+type Store interface {
+ AllUsers() ([]string, error)
+ GetUser(name string) (User, error)
+ CreateOrUpdateUser(user User) (out User, created bool, err error)
+ CreateUser(user User) (User, error)
+ DeleteUser(name string) error
+ UpdateUser(user User) (User, error)
+ AllRoles() ([]string, error)
+ GetRole(name string) (Role, error)
+ CreateRole(role Role) error
+ DeleteRole(name string) error
+ UpdateRole(role Role) (Role, error)
+ AuthEnabled() bool
+ EnableAuth() error
+ DisableAuth() error
+ PasswordStore
+}
+
+type PasswordStore interface {
+ CheckPassword(user User, password string) bool
+ HashPassword(password string) (string, error)
+}
+
+type store struct {
+ lg *zap.Logger
+ server doer
+ timeout time.Duration
+ ensuredOnce bool
+
+ PasswordStore
+}
+
+type User struct {
+ User string `json:"user"`
+ Password string `json:"password,omitempty"`
+ Roles []string `json:"roles"`
+ Grant []string `json:"grant,omitempty"`
+ Revoke []string `json:"revoke,omitempty"`
+}
+
+type Role struct {
+ Role string `json:"role"`
+ Permissions Permissions `json:"permissions"`
+ Grant *Permissions `json:"grant,omitempty"`
+ Revoke *Permissions `json:"revoke,omitempty"`
+}
+
+type Permissions struct {
+ KV RWPermission `json:"kv"`
+}
+
+func (p *Permissions) IsEmpty() bool {
+ return p == nil || (len(p.KV.Read) == 0 && len(p.KV.Write) == 0)
+}
+
+type RWPermission struct {
+ Read []string `json:"read"`
+ Write []string `json:"write"`
+}
+
+type Error struct {
+ Status int
+ Errmsg string
+}
+
+func (ae Error) Error() string { return ae.Errmsg }
+func (ae Error) HTTPStatus() int { return ae.Status }
+
+func authErr(hs int, s string, v ...interface{}) Error {
+ return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)}
+}
+
+func NewStore(lg *zap.Logger, server doer, timeout time.Duration) Store {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ s := &store{
+ lg: lg,
+ server: server,
+ timeout: timeout,
+ PasswordStore: passwordStore{},
+ }
+ return s
+}
+
+// passwordStore implements PasswordStore using bcrypt to hash user passwords
+type passwordStore struct{}
+
+func (passwordStore) CheckPassword(user User, password string) bool {
+ err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
+ return err == nil
+}
+
+func (passwordStore) HashPassword(password string) (string, error) {
+ hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
+ return string(hash), err
+}
+
+func (s *store) AllUsers() ([]string, error) {
+ resp, err := s.requestResource("/users/", false)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return []string{}, nil
+ }
+ }
+ return nil, err
+ }
+ var nodes []string
+ for _, n := range resp.Event.NodeExtern.ExternNodes {
+ _, user := path.Split(n.Key)
+ nodes = append(nodes, user)
+ }
+ sort.Strings(nodes)
+ return nodes, nil
+}
+
+func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) }
+
+// CreateOrUpdateUser should be only used for creating the new user or when you are not
+// sure if it is a create or update. (When only password is passed in, we are not sure
+// if it is a update or create)
+func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
+ _, err = s.getUser(user.User, true)
+ if err == nil {
+ out, err = s.UpdateUser(user)
+ return out, false, err
+ }
+ u, err := s.CreateUser(user)
+ return u, true, err
+}
+
+func (s *store) CreateUser(user User) (User, error) {
+ // Attach root role to root user.
+ if user.User == "root" {
+ user = attachRootRole(user)
+ }
+ u, err := s.createUserInternal(user)
+ if err == nil {
+ s.lg.Info("created a user", zap.String("user-name", user.User))
+ }
+ return u, err
+}
+
+func (s *store) createUserInternal(user User) (User, error) {
+ if user.Password == "" {
+ return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User)
+ }
+ hash, err := s.HashPassword(user.Password)
+ if err != nil {
+ return user, err
+ }
+ user.Password = hash
+
+ _, err = s.createResource("/users/"+user.User, user)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeNodeExist {
+ return user, authErr(http.StatusConflict, "User %s already exists.", user.User)
+ }
+ }
+ }
+ return user, err
+}
+
+func (s *store) DeleteUser(name string) error {
+ if s.AuthEnabled() && name == "root" {
+ return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.")
+ }
+ err := s.deleteResource("/users/" + name)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return authErr(http.StatusNotFound, "User %s does not exist", name)
+ }
+ }
+ return err
+ }
+ s.lg.Info("deleted a user", zap.String("user-name", name))
+ return nil
+}
+
+func (s *store) UpdateUser(user User) (User, error) {
+ old, err := s.getUser(user.User, true)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User)
+ }
+ }
+ return old, err
+ }
+
+ newUser, err := old.merge(s.lg, user, s.PasswordStore)
+ if err != nil {
+ return old, err
+ }
+ if reflect.DeepEqual(old, newUser) {
+ return old, authErr(http.StatusBadRequest, "User not updated. Use grant/revoke/password to update the user.")
+ }
+ _, err = s.updateResource("/users/"+user.User, newUser)
+ if err == nil {
+ s.lg.Info("updated a user", zap.String("user-name", user.User))
+ }
+ return newUser, err
+}
+
+func (s *store) AllRoles() ([]string, error) {
+ nodes := []string{RootRoleName}
+ resp, err := s.requestResource("/roles/", false)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return nodes, nil
+ }
+ }
+ return nil, err
+ }
+ for _, n := range resp.Event.NodeExtern.ExternNodes {
+ _, role := path.Split(n.Key)
+ nodes = append(nodes, role)
+ }
+ sort.Strings(nodes)
+ return nodes, nil
+}
+
+func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) }
+
+func (s *store) CreateRole(role Role) error {
+ if role.Role == RootRoleName {
+ return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
+ }
+ _, err := s.createResource("/roles/"+role.Role, role)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeNodeExist {
+ return authErr(http.StatusConflict, "Role %s already exists.", role.Role)
+ }
+ }
+ }
+ if err == nil {
+ s.lg.Info("created a new role", zap.String("role-name", role.Role))
+ }
+ return err
+}
+
+func (s *store) DeleteRole(name string) error {
+ if name == RootRoleName {
+ return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name)
+ }
+ err := s.deleteResource("/roles/" + name)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return authErr(http.StatusNotFound, "Role %s doesn't exist.", name)
+ }
+ }
+ }
+ if err == nil {
+ s.lg.Info("delete a new role", zap.String("role-name", name))
+ }
+ return err
+}
+
+func (s *store) UpdateRole(role Role) (Role, error) {
+ if role.Role == RootRoleName {
+ return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
+ }
+ old, err := s.getRole(role.Role, true)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role)
+ }
+ }
+ return old, err
+ }
+ newRole, err := old.merge(s.lg, role)
+ if err != nil {
+ return old, err
+ }
+ if reflect.DeepEqual(old, newRole) {
+ return old, authErr(http.StatusBadRequest, "Role not updated. Use grant/revoke to update the role.")
+ }
+ _, err = s.updateResource("/roles/"+role.Role, newRole)
+ if err == nil {
+ s.lg.Info("updated a new role", zap.String("role-name", role.Role))
+ }
+ return newRole, err
+}
+
+func (s *store) AuthEnabled() bool {
+ return s.detectAuth()
+}
+
+func (s *store) EnableAuth() error {
+ if s.AuthEnabled() {
+ return authErr(http.StatusConflict, "already enabled")
+ }
+
+ if _, err := s.getUser("root", true); err != nil {
+ return authErr(http.StatusConflict, "No root user available, please create one")
+ }
+ if _, err := s.getRole(GuestRoleName, true); err != nil {
+ s.lg.Info(
+ "no guest role access found; creating default",
+ zap.String("role-name", GuestRoleName),
+ )
+ if err := s.CreateRole(guestRole); err != nil {
+ s.lg.Warn(
+ "failed to create a guest role; aborting auth enable",
+ zap.String("role-name", GuestRoleName),
+ zap.Error(err),
+ )
+ return err
+ }
+ }
+
+ if err := s.enableAuth(); err != nil {
+ s.lg.Warn("failed to enable auth", zap.Error(err))
+ return err
+ }
+
+ s.lg.Info("enabled auth")
+ return nil
+}
+
+func (s *store) DisableAuth() error {
+ if !s.AuthEnabled() {
+ return authErr(http.StatusConflict, "already disabled")
+ }
+
+ err := s.disableAuth()
+ if err == nil {
+ s.lg.Info("disabled auth")
+ } else {
+ s.lg.Warn("failed to disable auth", zap.Error(err))
+ }
+ return err
+}
+
+// merge applies the properties of the passed-in User to the User on which it
+// is called and returns a new User with these modifications applied. Think of
+// all Users as immutable sets of data. Merge allows you to perform the set
+// operations (desired grants and revokes) atomically
+func (ou User) merge(lg *zap.Logger, nu User, s PasswordStore) (User, error) {
+ var out User
+ if ou.User != nu.User {
+ return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User)
+ }
+ out.User = ou.User
+ if nu.Password != "" {
+ hash, err := s.HashPassword(nu.Password)
+ if err != nil {
+ return ou, err
+ }
+ out.Password = hash
+ } else {
+ out.Password = ou.Password
+ }
+ currentRoles := types.NewUnsafeSet(ou.Roles...)
+ for _, g := range nu.Grant {
+ if currentRoles.Contains(g) {
+ lg.Warn(
+ "attempted to grant a duplicate role for a user",
+ zap.String("user-name", nu.User),
+ zap.String("role-name", g),
+ )
+ return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User))
+ }
+ currentRoles.Add(g)
+ }
+ for _, r := range nu.Revoke {
+ if !currentRoles.Contains(r) {
+ lg.Warn(
+ "attempted to revoke a ungranted role for a user",
+ zap.String("user-name", nu.User),
+ zap.String("role-name", r),
+ )
+ return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User))
+ }
+ currentRoles.Remove(r)
+ }
+ out.Roles = currentRoles.Values()
+ sort.Strings(out.Roles)
+ return out, nil
+}
+
+// merge for a role works the same as User above -- atomic Role application to
+// each of the substructures.
+func (r Role) merge(lg *zap.Logger, n Role) (Role, error) {
+ var out Role
+ var err error
+ if r.Role != n.Role {
+ return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role)
+ }
+ out.Role = r.Role
+ out.Permissions, err = r.Permissions.Grant(n.Grant)
+ if err != nil {
+ return out, err
+ }
+ out.Permissions, err = out.Permissions.Revoke(lg, n.Revoke)
+ return out, err
+}
+
+func (r Role) HasKeyAccess(key string, write bool) bool {
+ if r.Role == RootRoleName {
+ return true
+ }
+ return r.Permissions.KV.HasAccess(key, write)
+}
+
+func (r Role) HasRecursiveAccess(key string, write bool) bool {
+ if r.Role == RootRoleName {
+ return true
+ }
+ return r.Permissions.KV.HasRecursiveAccess(key, write)
+}
+
+// Grant adds a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (p Permissions) Grant(n *Permissions) (Permissions, error) {
+ var out Permissions
+ var err error
+ if n == nil {
+ return p, nil
+ }
+ out.KV, err = p.KV.Grant(n.KV)
+ return out, err
+}
+
+// Revoke removes a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (p Permissions) Revoke(lg *zap.Logger, n *Permissions) (Permissions, error) {
+ var out Permissions
+ var err error
+ if n == nil {
+ return p, nil
+ }
+ out.KV, err = p.KV.Revoke(lg, n.KV)
+ return out, err
+}
+
+// Grant adds a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) {
+ var out RWPermission
+ currentRead := types.NewUnsafeSet(rw.Read...)
+ for _, r := range n.Read {
+ if currentRead.Contains(r) {
+ return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r)
+ }
+ currentRead.Add(r)
+ }
+ currentWrite := types.NewUnsafeSet(rw.Write...)
+ for _, w := range n.Write {
+ if currentWrite.Contains(w) {
+ return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w)
+ }
+ currentWrite.Add(w)
+ }
+ out.Read = currentRead.Values()
+ out.Write = currentWrite.Values()
+ sort.Strings(out.Read)
+ sort.Strings(out.Write)
+ return out, nil
+}
+
+// Revoke removes a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (rw RWPermission) Revoke(lg *zap.Logger, n RWPermission) (RWPermission, error) {
+ var out RWPermission
+ currentRead := types.NewUnsafeSet(rw.Read...)
+ for _, r := range n.Read {
+ if !currentRead.Contains(r) {
+ lg.Info(
+ "revoking ungranted read permission",
+ zap.String("read-permission", r),
+ )
+ continue
+ }
+ currentRead.Remove(r)
+ }
+ currentWrite := types.NewUnsafeSet(rw.Write...)
+ for _, w := range n.Write {
+ if !currentWrite.Contains(w) {
+ lg.Info(
+ "revoking ungranted write permission",
+ zap.String("write-permission", w),
+ )
+ continue
+ }
+ currentWrite.Remove(w)
+ }
+ out.Read = currentRead.Values()
+ out.Write = currentWrite.Values()
+ sort.Strings(out.Read)
+ sort.Strings(out.Write)
+ return out, nil
+}
+
+func (rw RWPermission) HasAccess(key string, write bool) bool {
+ var list []string
+ if write {
+ list = rw.Write
+ } else {
+ list = rw.Read
+ }
+ for _, pat := range list {
+ match, err := simpleMatch(pat, key)
+ if err == nil && match {
+ return true
+ }
+ }
+ return false
+}
+
+func (rw RWPermission) HasRecursiveAccess(key string, write bool) bool {
+ list := rw.Read
+ if write {
+ list = rw.Write
+ }
+ for _, pat := range list {
+ match, err := prefixMatch(pat, key)
+ if err == nil && match {
+ return true
+ }
+ }
+ return false
+}
+
+func simpleMatch(pattern string, key string) (match bool, err error) {
+ if pattern[len(pattern)-1] == '*' {
+ return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
+ }
+ return key == pattern, nil
+}
+
+func prefixMatch(pattern string, key string) (match bool, err error) {
+ if pattern[len(pattern)-1] != '*' {
+ return false, nil
+ }
+ return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
+}
+
+func attachRootRole(u User) User {
+ inRoles := false
+ for _, r := range u.Roles {
+ if r == RootRoleName {
+ inRoles = true
+ break
+ }
+ }
+ if !inRoles {
+ u.Roles = append(u.Roles, RootRoleName)
+ }
+ return u
+}
+
+func (s *store) getUser(name string, quorum bool) (User, error) {
+ resp, err := s.requestResource("/users/"+name, quorum)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
+ }
+ }
+ return User{}, err
+ }
+ var u User
+ err = json.Unmarshal([]byte(*resp.Event.NodeExtern.Value), &u)
+ if err != nil {
+ return u, err
+ }
+ // Attach root role to root user.
+ if u.User == "root" {
+ u = attachRootRole(u)
+ }
+ return u, nil
+}
+
+func (s *store) getRole(name string, quorum bool) (Role, error) {
+ if name == RootRoleName {
+ return rootRole, nil
+ }
+ resp, err := s.requestResource("/roles/"+name, quorum)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
+ }
+ }
+ return Role{}, err
+ }
+ var r Role
+ err = json.Unmarshal([]byte(*resp.Event.NodeExtern.Value), &r)
+ return r, err
+}
diff --git a/etcd/etcdserver/api/v2auth/auth_requests.go b/etcd/etcdserver/api/v2auth/auth_requests.go
new file mode 100644
index 00000000000..a49d9dc968e
--- /dev/null
+++ b/etcd/etcdserver/api/v2auth/auth_requests.go
@@ -0,0 +1,180 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2auth
+
+import (
+ "context"
+ "encoding/json"
+ "path"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+ "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+)
+
+func (s *store) ensureAuthDirectories() error {
+ if s.ensuredOnce {
+ return nil
+ }
+ for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} {
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ pe := false
+ rr := etcdserverpb.Request{
+ Method: "PUT",
+ Path: res,
+ Dir: true,
+ PrevExist: &pe,
+ }
+ _, err := s.server.Do(ctx, rr)
+ cancel()
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeNodeExist {
+ continue
+ }
+ }
+ s.lg.Warn(
+ "failed to create auth directories",
+ zap.Error(err),
+ )
+ return err
+ }
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ defer cancel()
+ pe := false
+ rr := etcdserverpb.Request{
+ Method: "PUT",
+ Path: StorePermsPrefix + "/enabled",
+ Val: "false",
+ PrevExist: &pe,
+ }
+ _, err := s.server.Do(ctx, rr)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeNodeExist {
+ s.ensuredOnce = true
+ return nil
+ }
+ }
+ return err
+ }
+ s.ensuredOnce = true
+ return nil
+}
+
+func (s *store) enableAuth() error {
+ _, err := s.updateResource("/enabled", true)
+ return err
+}
+
+func (s *store) disableAuth() error {
+ _, err := s.updateResource("/enabled", false)
+ return err
+}
+
+func (s *store) detectAuth() bool {
+ if s.server == nil {
+ return false
+ }
+ value, err := s.requestResource("/enabled", false)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return false
+ }
+ }
+ s.lg.Warn(
+ "failed to detect auth settings",
+ zap.Error(err),
+ )
+ return false
+ }
+
+ var u bool
+ err = json.Unmarshal([]byte(*value.Event.NodeExtern.Value), &u)
+ if err != nil {
+ s.lg.Warn(
+ "internal bookkeeping value for enabled isn't valid JSON",
+ zap.Error(err),
+ )
+ return false
+ }
+ return u
+}
+
+func (s *store) requestResource(res string, quorum bool) (etcdserver.Response, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ defer cancel()
+ p := path.Join(StorePermsPrefix, res)
+ method := "GET"
+ if quorum {
+ method = "QGET"
+ }
+ rr := etcdserverpb.Request{
+ Method: method,
+ Path: p,
+ Dir: false, // TODO: always false?
+ }
+ return s.server.Do(ctx, rr)
+}
+
+func (s *store) updateResource(res string, value interface{}) (etcdserver.Response, error) {
+ return s.setResource(res, value, true)
+}
+
+func (s *store) createResource(res string, value interface{}) (etcdserver.Response, error) {
+ return s.setResource(res, value, false)
+}
+
+func (s *store) setResource(res string, value interface{}, prevexist bool) (etcdserver.Response, error) {
+ err := s.ensureAuthDirectories()
+ if err != nil {
+ return etcdserver.Response{}, err
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ defer cancel()
+ data, err := json.Marshal(value)
+ if err != nil {
+ return etcdserver.Response{}, err
+ }
+ p := path.Join(StorePermsPrefix, res)
+ rr := etcdserverpb.Request{
+ Method: "PUT",
+ Path: p,
+ Val: string(data),
+ PrevExist: &prevexist,
+ }
+ return s.server.Do(ctx, rr)
+}
+
+func (s *store) deleteResource(res string) error {
+ err := s.ensureAuthDirectories()
+ if err != nil {
+ return err
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ defer cancel()
+ pex := true
+ p := path.Join(StorePermsPrefix, res)
+ _, err = s.server.Do(ctx, etcdserverpb.Request{
+ Method: "DELETE",
+ Path: p,
+ PrevExist: &pex,
+ })
+ return err
+}
diff --git a/etcd/etcdserver/api/v2auth/auth_test.go b/etcd/etcdserver/api/v2auth/auth_test.go
new file mode 100644
index 00000000000..5bad22d12a1
--- /dev/null
+++ b/etcd/etcdserver/api/v2auth/auth_test.go
@@ -0,0 +1,677 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2auth
+
+import (
+ "context"
+ "reflect"
+ "testing"
+ "time"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+
+ "go.uber.org/zap"
+)
+
+type fakeDoer struct{}
+
+func (fakeDoer) Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error) {
+ return etcdserver.Response{}, nil
+}
+
+func TestCheckPassword(t *testing.T) {
+ st := NewStore(zap.NewExample(), fakeDoer{}, 5*time.Second)
+ u := User{Password: "$2a$10$I3iddh1D..EIOXXQtsra4u8AjOtgEa2ERxVvYGfXFBJDo1omXwP.q"}
+ matched := st.CheckPassword(u, "foo")
+ if matched {
+ t.Fatalf("expected false, got %v", matched)
+ }
+}
+
+const testTimeout = time.Millisecond
+
+func TestMergeUser(t *testing.T) {
+ tbl := []struct {
+ input User
+ merge User
+ expect User
+ iserr bool
+ }{
+ {
+ User{User: "foo"},
+ User{User: "bar"},
+ User{},
+ true,
+ },
+ {
+ User{User: "foo"},
+ User{User: "foo"},
+ User{User: "foo", Roles: []string{}},
+ false,
+ },
+ {
+ User{User: "foo"},
+ User{User: "foo", Grant: []string{"role1"}},
+ User{User: "foo", Roles: []string{"role1"}},
+ false,
+ },
+ {
+ User{User: "foo", Roles: []string{"role1"}},
+ User{User: "foo", Grant: []string{"role1"}},
+ User{},
+ true,
+ },
+ {
+ User{User: "foo", Roles: []string{"role1"}},
+ User{User: "foo", Revoke: []string{"role2"}},
+ User{},
+ true,
+ },
+ {
+ User{User: "foo", Roles: []string{"role1"}},
+ User{User: "foo", Grant: []string{"role2"}},
+ User{User: "foo", Roles: []string{"role1", "role2"}},
+ false,
+ },
+ { // empty password will not overwrite the previous password
+ User{User: "foo", Password: "foo", Roles: []string{}},
+ User{User: "foo", Password: ""},
+ User{User: "foo", Password: "foo", Roles: []string{}},
+ false,
+ },
+ }
+
+ for i, tt := range tbl {
+ out, err := tt.input.merge(zap.NewExample(), tt.merge, passwordStore{})
+ if err != nil && !tt.iserr {
+ t.Fatalf("Got unexpected error on item %d", i)
+ }
+ if !tt.iserr {
+ if !reflect.DeepEqual(out, tt.expect) {
+ t.Errorf("Unequal merge expectation on item %d: got: %#v, expect: %#v", i, out, tt.expect)
+ }
+ }
+ }
+}
+
+func TestMergeRole(t *testing.T) {
+ tbl := []struct {
+ input Role
+ merge Role
+ expect Role
+ iserr bool
+ }{
+ {
+ Role{Role: "foo"},
+ Role{Role: "bar"},
+ Role{},
+ true,
+ },
+ {
+ Role{Role: "foo"},
+ Role{Role: "foo", Grant: &Permissions{KV: RWPermission{Read: []string{"/foodir"}, Write: []string{"/foodir"}}}},
+ Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{"/foodir"}, Write: []string{"/foodir"}}}},
+ false,
+ },
+ {
+ Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{"/foodir"}, Write: []string{"/foodir"}}}},
+ Role{Role: "foo", Revoke: &Permissions{KV: RWPermission{Read: []string{"/foodir"}, Write: []string{"/foodir"}}}},
+ Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{}, Write: []string{}}}},
+ false,
+ },
+ {
+ Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{"/bardir"}}}},
+ Role{Role: "foo", Revoke: &Permissions{KV: RWPermission{Read: []string{"/foodir"}}}},
+ Role{},
+ true,
+ },
+ }
+ for i, tt := range tbl {
+ out, err := tt.input.merge(zap.NewExample(), tt.merge)
+ if err != nil && !tt.iserr {
+ t.Fatalf("Got unexpected error on item %d", i)
+ }
+ if !tt.iserr {
+ if !reflect.DeepEqual(out, tt.expect) {
+ t.Errorf("Unequal merge expectation on item %d: got: %#v, expect: %#v", i, out, tt.expect)
+ }
+ }
+ }
+}
+
+type testDoer struct {
+ get []etcdserver.Response
+ put []etcdserver.Response
+ getindex int
+ putindex int
+ explicitlyEnabled bool
+}
+
+func (td *testDoer) Do(_ context.Context, req etcdserverpb.Request) (etcdserver.Response, error) {
+ if td.explicitlyEnabled && (req.Path == StorePermsPrefix+"/enabled") {
+ t := "true"
+ return etcdserver.Response{
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/users/cat",
+ Value: &t,
+ },
+ },
+ }, nil
+ }
+ if (req.Method == "GET" || req.Method == "QGET") && td.get != nil {
+ res := td.get[td.getindex]
+ if res.Event == nil {
+ td.getindex++
+ return etcdserver.Response{}, &v2error.Error{
+ ErrorCode: v2error.EcodeKeyNotFound,
+ }
+ }
+ td.getindex++
+ return res, nil
+ }
+ if req.Method == "PUT" && td.put != nil {
+ res := td.put[td.putindex]
+ if res.Event == nil {
+ td.putindex++
+ return etcdserver.Response{}, &v2error.Error{
+ ErrorCode: v2error.EcodeNodeExist,
+ }
+ }
+ td.putindex++
+ return res, nil
+ }
+ return etcdserver.Response{}, nil
+}
+
+func TestAllUsers(t *testing.T) {
+ d := &testDoer{
+ get: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Nodes: v2store.NodeExterns([]*v2store.NodeExtern{
+ {
+ Key: StorePermsPrefix + "/users/cat",
+ },
+ {
+ Key: StorePermsPrefix + "/users/dog",
+ },
+ }),
+ },
+ },
+ },
+ },
+ }
+ expected := []string{"cat", "dog"}
+
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false}
+ users, err := s.AllUsers()
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+ if !reflect.DeepEqual(users, expected) {
+ t.Error("AllUsers doesn't match given store. Got", users, "expected", expected)
+ }
+}
+
+func TestGetAndDeleteUser(t *testing.T) {
+ data := `{"user": "cat", "roles" : ["animal"]}`
+ d := &testDoer{
+ get: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/users/cat",
+ Value: &data,
+ },
+ },
+ },
+ },
+ explicitlyEnabled: true,
+ }
+ expected := User{User: "cat", Roles: []string{"animal"}}
+
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false}
+ out, err := s.GetUser("cat")
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+ if !reflect.DeepEqual(out, expected) {
+ t.Error("GetUser doesn't match given store. Got", out, "expected", expected)
+ }
+ err = s.DeleteUser("cat")
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+}
+
+func TestAllRoles(t *testing.T) {
+ d := &testDoer{
+ get: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Nodes: v2store.NodeExterns([]*v2store.NodeExtern{
+ {
+ Key: StorePermsPrefix + "/roles/animal",
+ },
+ {
+ Key: StorePermsPrefix + "/roles/human",
+ },
+ }),
+ },
+ },
+ },
+ },
+ explicitlyEnabled: true,
+ }
+ expected := []string{"animal", "human", "root"}
+
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false}
+ out, err := s.AllRoles()
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+ if !reflect.DeepEqual(out, expected) {
+ t.Error("AllRoles doesn't match given store. Got", out, "expected", expected)
+ }
+}
+
+func TestGetAndDeleteRole(t *testing.T) {
+ data := `{"role": "animal"}`
+ d := &testDoer{
+ get: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/roles/animal",
+ Value: &data,
+ },
+ },
+ },
+ },
+ explicitlyEnabled: true,
+ }
+ expected := Role{Role: "animal"}
+
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false}
+ out, err := s.GetRole("animal")
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+ if !reflect.DeepEqual(out, expected) {
+ t.Error("GetRole doesn't match given store. Got", out, "expected", expected)
+ }
+ err = s.DeleteRole("animal")
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+}
+
+func TestEnsure(t *testing.T) {
+ d := &testDoer{
+ get: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Set,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix,
+ Dir: true,
+ },
+ },
+ },
+ {
+ Event: &v2store.Event{
+ Action: v2store.Set,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/users/",
+ Dir: true,
+ },
+ },
+ },
+ {
+ Event: &v2store.Event{
+ Action: v2store.Set,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/roles/",
+ Dir: true,
+ },
+ },
+ },
+ },
+ }
+
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: false}
+ err := s.ensureAuthDirectories()
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+}
+
+type fastPasswordStore struct{}
+
+func (fastPasswordStore) CheckPassword(user User, password string) bool {
+ return user.Password == password
+}
+
+func (fastPasswordStore) HashPassword(password string) (string, error) { return password, nil }
+
+func TestCreateAndUpdateUser(t *testing.T) {
+ olduser := `{"user": "cat", "roles" : ["animal"]}`
+ newuser := `{"user": "cat", "roles" : ["animal", "pet"]}`
+ d := &testDoer{
+ get: []etcdserver.Response{
+ {
+ Event: nil,
+ },
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/users/cat",
+ Value: &olduser,
+ },
+ },
+ },
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/users/cat",
+ Value: &olduser,
+ },
+ },
+ },
+ },
+ put: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Update,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/users/cat",
+ Value: &olduser,
+ },
+ },
+ },
+ {
+ Event: &v2store.Event{
+ Action: v2store.Update,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/users/cat",
+ Value: &newuser,
+ },
+ },
+ },
+ },
+ explicitlyEnabled: true,
+ }
+ user := User{User: "cat", Password: "meow", Roles: []string{"animal"}}
+ update := User{User: "cat", Grant: []string{"pet"}}
+ expected := User{User: "cat", Roles: []string{"animal", "pet"}}
+
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true, PasswordStore: fastPasswordStore{}}
+ out, created, err := s.CreateOrUpdateUser(user)
+ if !created {
+ t.Error("Should have created user, instead updated?")
+ }
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+ out.Password = "meow"
+ if !reflect.DeepEqual(out, user) {
+ t.Error("UpdateUser doesn't match given update. Got", out, "expected", expected)
+ }
+ out, created, err = s.CreateOrUpdateUser(update)
+ if created {
+ t.Error("Should have updated user, instead created?")
+ }
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+ if !reflect.DeepEqual(out, expected) {
+ t.Error("UpdateUser doesn't match given update. Got", out, "expected", expected)
+ }
+}
+
+func TestUpdateRole(t *testing.T) {
+ oldrole := `{"role": "animal", "permissions" : {"kv": {"read": ["/animal"], "write": []}}}`
+ newrole := `{"role": "animal", "permissions" : {"kv": {"read": ["/animal"], "write": ["/animal"]}}}`
+ d := &testDoer{
+ get: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/roles/animal",
+ Value: &oldrole,
+ },
+ },
+ },
+ },
+ put: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Update,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/roles/animal",
+ Value: &newrole,
+ },
+ },
+ },
+ },
+ explicitlyEnabled: true,
+ }
+ update := Role{Role: "animal", Grant: &Permissions{KV: RWPermission{Read: []string{}, Write: []string{"/animal"}}}}
+ expected := Role{Role: "animal", Permissions: Permissions{KV: RWPermission{Read: []string{"/animal"}, Write: []string{"/animal"}}}}
+
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true}
+ out, err := s.UpdateRole(update)
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+ if !reflect.DeepEqual(out, expected) {
+ t.Error("UpdateRole doesn't match given update. Got", out, "expected", expected)
+ }
+}
+
+func TestCreateRole(t *testing.T) {
+ role := `{"role": "animal", "permissions" : {"kv": {"read": ["/animal"], "write": []}}}`
+ d := &testDoer{
+ put: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Create,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/roles/animal",
+ Value: &role,
+ },
+ },
+ },
+ {
+ Event: nil,
+ },
+ },
+ explicitlyEnabled: true,
+ }
+ r := Role{Role: "animal", Permissions: Permissions{KV: RWPermission{Read: []string{"/animal"}, Write: []string{}}}}
+
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true}
+ err := s.CreateRole(Role{Role: "root"})
+ if err == nil {
+ t.Error("Should error creating root role")
+ }
+ err = s.CreateRole(r)
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+ err = s.CreateRole(r)
+ if err == nil {
+ t.Error("Creating duplicate role, should error")
+ }
+}
+
+func TestEnableAuth(t *testing.T) {
+ rootUser := `{"user": "root", "password": ""}`
+ guestRole := `{"role": "guest", "permissions" : {"kv": {"read": ["*"], "write": ["*"]}}}`
+ trueval := "true"
+ falseval := "false"
+ d := &testDoer{
+ get: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/enabled",
+ Value: &falseval,
+ },
+ },
+ },
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/user/root",
+ Value: &rootUser,
+ },
+ },
+ },
+ {
+ Event: nil,
+ },
+ },
+ put: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Create,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/roles/guest",
+ Value: &guestRole,
+ },
+ },
+ },
+ {
+ Event: &v2store.Event{
+ Action: v2store.Update,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/enabled",
+ Value: &trueval,
+ },
+ },
+ },
+ },
+ explicitlyEnabled: false,
+ }
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true}
+ err := s.EnableAuth()
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+}
+
+func TestDisableAuth(t *testing.T) {
+ trueval := "true"
+ falseval := "false"
+ d := &testDoer{
+ get: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/enabled",
+ Value: &falseval,
+ },
+ },
+ },
+ {
+ Event: &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/enabled",
+ Value: &trueval,
+ },
+ },
+ },
+ },
+ put: []etcdserver.Response{
+ {
+ Event: &v2store.Event{
+ Action: v2store.Update,
+ Node: &v2store.NodeExtern{
+ Key: StorePermsPrefix + "/enabled",
+ Value: &falseval,
+ },
+ },
+ },
+ },
+ explicitlyEnabled: false,
+ }
+ s := store{lg: zap.NewExample(), server: d, timeout: testTimeout, ensuredOnce: true}
+ err := s.DisableAuth()
+ if err == nil {
+ t.Error("Expected error; already disabled")
+ }
+
+ err = s.DisableAuth()
+ if err != nil {
+ t.Error("Unexpected error", err)
+ }
+}
+
+func TestSimpleMatch(t *testing.T) {
+ role := Role{Role: "foo", Permissions: Permissions{KV: RWPermission{Read: []string{"/foodir/*", "/fookey"}, Write: []string{"/bardir/*", "/barkey"}}}}
+ if !role.HasKeyAccess("/foodir/foo/bar", false) {
+ t.Fatal("role lacks expected access")
+ }
+ if !role.HasKeyAccess("/fookey", false) {
+ t.Fatal("role lacks expected access")
+ }
+ if !role.HasRecursiveAccess("/foodir/*", false) {
+ t.Fatal("role lacks expected access")
+ }
+ if !role.HasRecursiveAccess("/foodir/foo*", false) {
+ t.Fatal("role lacks expected access")
+ }
+ if !role.HasRecursiveAccess("/bardir/*", true) {
+ t.Fatal("role lacks expected access")
+ }
+ if !role.HasKeyAccess("/bardir/bar/foo", true) {
+ t.Fatal("role lacks expected access")
+ }
+ if !role.HasKeyAccess("/barkey", true) {
+ t.Fatal("role lacks expected access")
+ }
+
+ if role.HasKeyAccess("/bardir/bar/foo", false) {
+ t.Fatal("role has unexpected access")
+ }
+ if role.HasKeyAccess("/barkey", false) {
+ t.Fatal("role has unexpected access")
+ }
+ if role.HasKeyAccess("/foodir/foo/bar", true) {
+ t.Fatal("role has unexpected access")
+ }
+ if role.HasKeyAccess("/fookey", true) {
+ t.Fatal("role has unexpected access")
+ }
+}
diff --git a/etcd/etcdserver/api/v2discovery/discovery.go b/etcd/etcdserver/api/v2discovery/discovery.go
new file mode 100644
index 00000000000..7820e4ed3ea
--- /dev/null
+++ b/etcd/etcdserver/api/v2discovery/discovery.go
@@ -0,0 +1,411 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2discovery provides an implementation of the cluster discovery that
+// is used by etcd with v2 clientv2.
+package v2discovery
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "net/http"
+ "net/url"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ clientv2 "github.com/ls-2018/etcd_cn/client_sdk/v2"
+
+ "github.com/jonboulle/clockwork"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "go.uber.org/zap"
+)
+
+var (
+ ErrInvalidURL = errors.New("discovery: invalid URL")
+ ErrBadSizeKey = errors.New("discovery: size key is bad")
+ ErrSizeNotFound = errors.New("discovery: size key not found")
+ ErrTokenNotFound = errors.New("discovery: token not found")
+ ErrDuplicateID = errors.New("discovery: found duplicate id")
+ ErrDuplicateName = errors.New("discovery: found duplicate name")
+ ErrFullCluster = errors.New("discovery: cluster is full")
+ ErrTooManyRetries = errors.New("discovery: too many retries")
+ ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint")
+)
+
+var (
+ // Number of retries discovery will attempt before giving up and erroring out.
+ nRetries = uint(math.MaxUint32)
+ maxExpoentialRetries = uint(8)
+)
+
+// JoinCluster 将连接到给定网址的发现服务,并将给定id和配置所代表的etcd注册到集群上.
+func JoinCluster(lg *zap.Logger, durl, dproxyurl string, id types.ID, config string) (string, error) {
+ d, err := newDiscovery(lg, durl, dproxyurl, id)
+ if err != nil {
+ return "", err
+ }
+ return d.joinCluster(config)
+}
+
+// GetCluster will connect to the discovery service at the given url and
+// retrieve a string describing the cluster
+func GetCluster(lg *zap.Logger, durl, dproxyurl string) (string, error) {
+ d, err := newDiscovery(lg, durl, dproxyurl, 0)
+ if err != nil {
+ return "", err
+ }
+ return d.getCluster()
+}
+
+type discovery struct {
+ lg *zap.Logger
+ cluster string
+ id types.ID
+ c clientv2.KeysAPI
+ retries uint
+ url *url.URL
+
+ clock clockwork.Clock
+}
+
+// newProxyFunc builds a proxy function from the given string, which should
+// represent a URL that can be used as a proxy. It performs basic
+// sanitization of the URL and returns any error encountered.
+func newProxyFunc(lg *zap.Logger, proxy string) (func(*http.Request) (*url.URL, error), error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ if proxy == "" {
+ return nil, nil
+ }
+ // Do a small amount of URL sanitization to help the user
+ // Derived from net/http.ProxyFromEnvironment
+ proxyURL, err := url.Parse(proxy)
+ if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
+ // proxy was bogus. Try prepending "http://" to it and
+ // see if that parses correctly. If not, we ignore the
+ // error and complain about the original one
+ var err2 error
+ proxyURL, err2 = url.Parse("http://" + proxy)
+ if err2 == nil {
+ err = nil
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
+ }
+
+ lg.Info("running proxy with discovery", zap.String("proxy-url", proxyURL.String()))
+ return http.ProxyURL(proxyURL), nil
+}
+
+func newDiscovery(lg *zap.Logger, durl, dproxyurl string, id types.ID) (*discovery, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ u, err := url.Parse(durl)
+ if err != nil {
+ return nil, err
+ }
+ token := u.Path
+ u.Path = ""
+ pf, err := newProxyFunc(lg, dproxyurl)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
+ tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second)
+ if err != nil {
+ return nil, err
+ }
+ tr.Proxy = pf
+ cfg := clientv2.Config{
+ Transport: tr,
+ Endpoints: []string{u.String()},
+ }
+ c, err := clientv2.New(cfg)
+ if err != nil {
+ return nil, err
+ }
+ dc := clientv2.NewKeysAPIWithPrefix(c, "")
+ return &discovery{
+ lg: lg,
+ cluster: token,
+ c: dc,
+ id: id,
+ url: u,
+ clock: clockwork.NewRealClock(),
+ }, nil
+}
+
+func (d *discovery) joinCluster(config string) (string, error) {
+ // fast path: if the cluster is full, return the error
+ // do not need to register to the cluster in this case.
+ if _, _, _, err := d.checkCluster(); err != nil {
+ return "", err
+ }
+
+ if err := d.createSelf(config); err != nil {
+ // Fails, even on a timeout, if createSelf times out.
+ // TODO(barakmich): Retrying the same node might want to succeed here
+ // (ie, createSelf should be idempotent for discovery).
+ return "", err
+ }
+
+ nodes, size, index, err := d.checkCluster()
+ if err != nil {
+ return "", err
+ }
+
+ all, err := d.waitNodes(nodes, size, index)
+ if err != nil {
+ return "", err
+ }
+
+ return nodesToCluster(all, size)
+}
+
+func (d *discovery) getCluster() (string, error) {
+ nodes, size, index, err := d.checkCluster()
+ if err != nil {
+ if err == ErrFullCluster {
+ return nodesToCluster(nodes, size)
+ }
+ return "", err
+ }
+
+ all, err := d.waitNodes(nodes, size, index)
+ if err != nil {
+ return "", err
+ }
+ return nodesToCluster(all, size)
+}
+
+func (d *discovery) createSelf(contents string) error {
+ ctx, cancel := context.WithTimeout(context.Background(), clientv2.DefaultRequestTimeout)
+ resp, err := d.c.Create(ctx, d.selfKey(), contents)
+ cancel()
+ if err != nil {
+ if eerr, ok := err.(clientv2.Error); ok && eerr.Code == clientv2.ErrorCodeNodeExist {
+ return ErrDuplicateID
+ }
+ return err
+ }
+
+ // ensure self appears on the etcd we connected to
+ w := d.c.Watcher(d.selfKey(), &clientv2.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1})
+ _, err = w.Next(context.Background())
+ return err
+}
+
+func (d *discovery) checkCluster() ([]*clientv2.Node, uint64, uint64, error) {
+ configKey := path.Join("/", d.cluster, "_config")
+ ctx, cancel := context.WithTimeout(context.Background(), clientv2.DefaultRequestTimeout)
+ // find cluster size
+ resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil)
+ cancel()
+ if err != nil {
+ if eerr, ok := err.(*clientv2.Error); ok && eerr.Code == clientv2.ErrorCodeKeyNotFound {
+ return nil, 0, 0, ErrSizeNotFound
+ }
+ if err == clientv2.ErrInvalidJSON {
+ return nil, 0, 0, ErrBadDiscoveryEndpoint
+ }
+ if ce, ok := err.(*clientv2.ClusterError); ok {
+ d.lg.Warn(
+ "failed to get from discovery etcd",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("path", path.Join(configKey, "size")),
+ zap.Error(err),
+ zap.String("err-detail", ce.Detail()),
+ )
+ return d.checkClusterRetry()
+ }
+ return nil, 0, 0, err
+ }
+ size, err := strconv.ParseUint(resp.Node.Value, 10, 0)
+ if err != nil {
+ return nil, 0, 0, ErrBadSizeKey
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), clientv2.DefaultRequestTimeout)
+ resp, err = d.c.Get(ctx, d.cluster, nil)
+ cancel()
+ if err != nil {
+ if ce, ok := err.(*clientv2.ClusterError); ok {
+ d.lg.Warn(
+ "failed to get from discovery etcd",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("path", d.cluster),
+ zap.Error(err),
+ zap.String("err-detail", ce.Detail()),
+ )
+ return d.checkClusterRetry()
+ }
+ return nil, 0, 0, err
+ }
+ var nodes []*clientv2.Node
+ // append non-config keys to nodes
+ for _, n := range resp.Node.Nodes {
+ if path.Base(n.Key) != path.Base(configKey) {
+ nodes = append(nodes, n)
+ }
+ }
+
+ snodes := sortableNodes{nodes}
+ sort.Sort(snodes)
+
+ // find self position
+ for i := range nodes {
+ if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
+ break
+ }
+ if uint64(i) >= size-1 {
+ return nodes[:size], size, resp.Index, ErrFullCluster
+ }
+ }
+ return nodes, size, resp.Index, nil
+}
+
+func (d *discovery) logAndBackoffForRetry(step string) {
+ d.retries++
+ // logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
+ retries := d.retries
+ if retries > maxExpoentialRetries {
+ retries = maxExpoentialRetries
+ }
+ retryTimeInSecond := time.Duration(0x1< size {
+ nodes = nodes[:size]
+ }
+ // watch from the next index
+ w := d.c.Watcher(d.cluster, &clientv2.WatcherOptions{AfterIndex: index, Recursive: true})
+ all := make([]*clientv2.Node, len(nodes))
+ copy(all, nodes)
+ for _, n := range all {
+ if path.Base(n.Key) == path.Base(d.selfKey()) {
+ d.lg.Info(
+ "found self from discovery etcd",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("self", path.Base(d.selfKey())),
+ )
+ } else {
+ d.lg.Info(
+ "found peer from discovery etcd",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("peer", path.Base(n.Key)),
+ )
+ }
+ }
+
+ // wait for others
+ for uint64(len(all)) < size {
+ d.lg.Info(
+ "found peers from discovery etcd; waiting for more",
+ zap.String("discovery-url", d.url.String()),
+ zap.Int("found-peers", len(all)),
+ zap.Int("needed-peers", int(size-uint64(len(all)))),
+ )
+ resp, err := w.Next(context.Background())
+ if err != nil {
+ if ce, ok := err.(*clientv2.ClusterError); ok {
+ d.lg.Warn(
+ "error while waiting for peers",
+ zap.String("discovery-url", d.url.String()),
+ zap.Error(err),
+ zap.String("err-detail", ce.Detail()),
+ )
+ return d.waitNodesRetry()
+ }
+ return nil, err
+ }
+ d.lg.Info(
+ "found peer from discovery etcd",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("peer", path.Base(resp.Node.Key)),
+ )
+ all = append(all, resp.Node)
+ }
+ d.lg.Info(
+ "found all needed peers from discovery etcd",
+ zap.String("discovery-url", d.url.String()),
+ zap.Int("found-peers", len(all)),
+ )
+ return all, nil
+}
+
+func (d *discovery) selfKey() string {
+ return path.Join("/", d.cluster, d.id.String())
+}
+
+func nodesToCluster(ns []*clientv2.Node, size uint64) (string, error) {
+ s := make([]string, len(ns))
+ for i, n := range ns {
+ s[i] = n.Value
+ }
+ us := strings.Join(s, ",")
+ m, err := types.NewURLsMap(us)
+ if err != nil {
+ return us, ErrInvalidURL
+ }
+ if uint64(m.Len()) != size {
+ return us, ErrDuplicateName
+ }
+ return us, nil
+}
+
+type sortableNodes struct{ Nodes []*clientv2.Node }
+
+func (ns sortableNodes) Len() int { return len(ns.Nodes) }
+func (ns sortableNodes) Less(i, j int) bool {
+ return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex
+}
+func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] }
diff --git a/etcd/etcdserver/api/v2error/error.go b/etcd/etcdserver/api/v2error/error.go
new file mode 100644
index 00000000000..17fc3e2688e
--- /dev/null
+++ b/etcd/etcdserver/api/v2error/error.go
@@ -0,0 +1,165 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2error describes errors in etcd project. When any change happens,
+// https://github.com/etcd-io/website/blob/main/content/docs/v2/errorcode.md
+// needs to be updated correspondingly.
+// To be deprecated in favor of v3 APIs.
+package v2error
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+var errors = map[int]string{
+ // command related errors
+ EcodeKeyNotFound: "key没有找到",
+ EcodeTestFailed: "Compare failed", // test and set
+ EcodeNotFile: "Not a file",
+ ecodeNoMorePeer: "Reached the max number of peers in the cluster",
+ EcodeNotDir: "Not a directory",
+ EcodeNodeExist: "Key already exists", // create
+ ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd",
+ EcodeRootROnly: "Root is read only",
+ EcodeDirNotEmpty: "Directory not empty",
+ ecodeExistingPeerAddr: "Peer address has existed",
+ EcodeUnauthorized: "The request requires user authentication",
+
+ // Post form related errors
+ ecodeValueRequired: "Value is Required in POST form",
+ EcodePrevValueRequired: "PrevValue is Required in POST form",
+ EcodeTTLNaN: "The given TTL in POST form is not a number",
+ EcodeIndexNaN: "The given index in POST form is not a number",
+ ecodeValueOrTTLRequired: "Value or TTL is required in POST form",
+ ecodeTimeoutNaN: "The given timeout in POST form is not a number",
+ ecodeNameRequired: "Name is required in POST form",
+ ecodeIndexOrValueRequired: "Index or value is required",
+ ecodeIndexValueMutex: "Index and value cannot both be specified",
+ EcodeInvalidField: "Invalid field",
+ EcodeInvalidForm: "Invalid POST form",
+ EcodeRefreshValue: "Value provided on refresh",
+ EcodeRefreshTTLRequired: "A TTL必须是provided on refresh",
+
+ // raft related errors
+ EcodeRaftInternal: "Raft Internal Error",
+ EcodeLeaderElect: "During Leader Election",
+
+ // etcd related errors
+ EcodeWatcherCleared: "watcher is cleared due to etcd recovery",
+ EcodeEventIndexCleared: "The event in requested index is outdated and cleared",
+ ecodeStandbyInternal: "Standby Internal Error",
+ ecodeInvalidActiveSize: "Invalid active size",
+ ecodeInvalidRemoveDelay: "Standby remove delay",
+
+ // client related errors
+ ecodeClientInternal: "Client Internal Error",
+}
+
+var errorStatus = map[int]int{
+ EcodeKeyNotFound: http.StatusNotFound,
+ EcodeNotFile: http.StatusForbidden,
+ EcodeDirNotEmpty: http.StatusForbidden,
+ EcodeUnauthorized: http.StatusUnauthorized,
+ EcodeTestFailed: http.StatusPreconditionFailed,
+ EcodeNodeExist: http.StatusPreconditionFailed,
+ EcodeRaftInternal: http.StatusInternalServerError,
+ EcodeLeaderElect: http.StatusInternalServerError,
+}
+
+const (
+ EcodeKeyNotFound = 100
+ EcodeTestFailed = 101
+ EcodeNotFile = 102
+ ecodeNoMorePeer = 103
+ EcodeNotDir = 104
+ EcodeNodeExist = 105
+ ecodeKeyIsPreserved = 106
+ EcodeRootROnly = 107
+ EcodeDirNotEmpty = 108
+ ecodeExistingPeerAddr = 109
+ EcodeUnauthorized = 110
+
+ ecodeValueRequired = 200
+ EcodePrevValueRequired = 201
+ EcodeTTLNaN = 202
+ EcodeIndexNaN = 203
+ ecodeValueOrTTLRequired = 204
+ ecodeTimeoutNaN = 205
+ ecodeNameRequired = 206
+ ecodeIndexOrValueRequired = 207
+ ecodeIndexValueMutex = 208
+ EcodeInvalidField = 209
+ EcodeInvalidForm = 210
+ EcodeRefreshValue = 211
+ EcodeRefreshTTLRequired = 212
+
+ EcodeRaftInternal = 300
+ EcodeLeaderElect = 301
+
+ EcodeWatcherCleared = 400
+ EcodeEventIndexCleared = 401
+ ecodeStandbyInternal = 402
+ ecodeInvalidActiveSize = 403
+ ecodeInvalidRemoveDelay = 404
+
+ ecodeClientInternal = 500
+)
+
+type Error struct {
+ ErrorCode int `json:"errorCode"`
+ Message string `json:"message"`
+ Cause string `json:"cause,omitempty"`
+ Index uint64 `json:"index"`
+}
+
+func NewRequestError(errorCode int, cause string) *Error {
+ return NewError(errorCode, cause, 0)
+}
+
+func NewError(errorCode int, cause string, index uint64) *Error {
+ return &Error{
+ ErrorCode: errorCode,
+ Message: errors[errorCode],
+ Cause: cause,
+ Index: index,
+ }
+}
+
+// Error is for the error interface
+func (e Error) Error() string {
+ return e.Message + " (" + e.Cause + ")"
+}
+
+func (e Error) toJsonString() string {
+ b, _ := json.Marshal(e)
+ return string(b)
+}
+
+func (e Error) StatusCode() int {
+ status, ok := errorStatus[e.ErrorCode]
+ if !ok {
+ status = http.StatusBadRequest
+ }
+ return status
+}
+
+func (e Error) WriteTo(w http.ResponseWriter) error {
+ w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index))
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(e.StatusCode())
+ _, err := w.Write([]byte(e.toJsonString() + "\n"))
+ return err
+}
diff --git a/etcd/etcdserver/api/v2http/capability.go b/etcd/etcdserver/api/v2http/capability.go
new file mode 100644
index 00000000000..606c6180337
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/capability.go
@@ -0,0 +1,41 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes"
+)
+
+func authCapabilityHandler(fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if !api.IsCapabilityEnabled(api.AuthCapability) {
+ notCapable(w, r, api.AuthCapability)
+ return
+ }
+ fn(w, r)
+ }
+}
+
+func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) {
+ herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c))
+ if err := herr.WriteTo(w); err != nil {
+ // TODO: the following plog was removed, add the logging back if possible
+ // plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
+ }
+}
diff --git a/etcd/etcdserver/api/v2http/client.go b/etcd/etcdserver/api/v2http/client.go
new file mode 100644
index 00000000000..cab41129446
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/client.go
@@ -0,0 +1,757 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+ "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+)
+
+const (
+ authPrefix = "/v2/auth"
+ keysPrefix = "/v2/keys"
+ machinesPrefix = "/v2/machines"
+ membersPrefix = "/v2/members"
+ statsPrefix = "/v2/stats"
+)
+
+// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
+func NewClientHandler(lg *zap.Logger, server etcdserver.ServerPeer, timeout time.Duration) http.Handler {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ mux := http.NewServeMux()
+ etcdhttp.HandleBasic(lg, mux, server)
+ etcdhttp.HandleMetricsHealth(lg, mux, server)
+ handleV2(lg, mux, server, timeout)
+ return requestLogger(lg, mux)
+}
+
+func handleV2(lg *zap.Logger, mux *http.ServeMux, server etcdserver.ServerV2, timeout time.Duration) {
+ sec := v2auth.NewStore(lg, server, timeout)
+ kh := &keysHandler{
+ lg: lg,
+ sec: sec,
+ server: server,
+ cluster: server.Cluster(),
+ timeout: timeout,
+ clientCertAuthEnabled: server.ClientCertAuthEnabled(),
+ }
+
+ sh := &statsHandler{
+ lg: lg,
+ stats: server,
+ }
+
+ mh := &membersHandler{
+ lg: lg,
+ sec: sec,
+ server: server,
+ cluster: server.Cluster(),
+ timeout: timeout,
+ clock: clockwork.NewRealClock(),
+ clientCertAuthEnabled: server.ClientCertAuthEnabled(),
+ }
+
+ mah := &machinesHandler{cluster: server.Cluster()}
+
+ sech := &authHandler{
+ lg: lg,
+ sec: sec,
+ cluster: server.Cluster(),
+ clientCertAuthEnabled: server.ClientCertAuthEnabled(),
+ }
+ mux.HandleFunc("/", http.NotFound)
+ mux.Handle(keysPrefix, kh)
+ mux.Handle(keysPrefix+"/", kh)
+ mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
+ mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
+ mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
+ mux.Handle(membersPrefix, mh)
+ mux.Handle(membersPrefix+"/", mh)
+ mux.Handle(machinesPrefix, mah)
+ handleAuth(mux, sech)
+}
+
+type keysHandler struct {
+ lg *zap.Logger
+ sec v2auth.Store
+ server etcdserver.ServerV2
+ cluster api.Cluster
+ timeout time.Duration
+ clientCertAuthEnabled bool
+}
+
+func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") {
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
+ defer cancel()
+ clock := clockwork.NewRealClock()
+ startTime := clock.Now()
+ rr, noValueOnSuccess, err := parseKeyRequest(r, clock)
+ if err != nil {
+ writeKeyError(h.lg, w, err)
+ return
+ }
+ // The path must be valid at this point (we've parsed the request successfully).
+ if !hasKeyPrefixAccess(h.lg, h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) {
+ writeKeyNoAuth(w)
+ return
+ }
+ if !rr.Wait {
+ reportRequestReceived(rr)
+ }
+ resp, err := h.server.Do(ctx, rr)
+ if err != nil {
+ err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix)
+ writeKeyError(h.lg, w, err)
+ reportRequestFailed(rr, err)
+ return
+ }
+ switch {
+ case resp.Event != nil:
+ if err := writeKeyEvent(w, resp, noValueOnSuccess); err != nil {
+ // Should never be reached
+ h.lg.Warn("failed to write key event", zap.Error(err))
+ }
+ reportRequestCompleted(rr, startTime)
+ case resp.Watcher != nil:
+ ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
+ defer cancel()
+ handleKeyWatch(ctx, h.lg, w, resp, rr.Stream)
+ default:
+ writeKeyError(h.lg, w, errors.New("received response with no Event/Watcher"))
+ }
+}
+
+type machinesHandler struct {
+ cluster api.Cluster
+}
+
+func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET", "HEAD") {
+ return
+ }
+ endpoints := h.cluster.ClientURLs()
+ w.Write([]byte(strings.Join(endpoints, ", ")))
+}
+
+type membersHandler struct {
+ lg *zap.Logger
+ sec v2auth.Store
+ server etcdserver.ServerV2
+ cluster api.Cluster
+ timeout time.Duration
+ clock clockwork.Clock
+ clientCertAuthEnabled bool
+}
+
+func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") {
+ return
+ }
+ if !hasWriteRootAccess(h.lg, h.sec, r, h.clientCertAuthEnabled) {
+ writeNoAuth(h.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
+ defer cancel()
+
+ switch r.Method {
+ case "GET":
+ switch trimPrefix(r.URL.Path, membersPrefix) {
+ case "":
+ mc := newMemberCollection(h.cluster.Members())
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(mc); err != nil {
+ h.lg.Warn("failed to encode members response", zap.Error(err))
+ }
+ case "leader":
+ id := h.server.Leader()
+ if id == 0 {
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election"))
+ return
+ }
+ m := newMember(h.cluster.Member(id))
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(m); err != nil {
+ h.lg.Warn("failed to encode members response", zap.Error(err))
+ }
+ default:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found"))
+ }
+
+ case "POST":
+ req := httptypes.MemberCreateRequest{}
+ if ok := unmarshalRequest(h.lg, r, &req, w); !ok {
+ return
+ }
+ now := h.clock.Now()
+ m := membership.NewMember("", req.PeerURLs, "", &now)
+ _, err := h.server.AddMember(ctx, *m)
+ switch {
+ case err == membership.ErrIDExists || err == membership.ErrPeerURLexists:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
+ return
+ case err != nil:
+ h.lg.Warn(
+ "failed to add a member",
+ zap.String("member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ writeError(h.lg, w, r, err)
+ return
+ }
+ res := newMember(m)
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusCreated)
+ if err := json.NewEncoder(w).Encode(res); err != nil {
+ h.lg.Warn("failed to encode members response", zap.Error(err))
+ }
+
+ case "DELETE":
+ id, ok := getID(h.lg, r.URL.Path, w)
+ if !ok {
+ return
+ }
+ _, err := h.server.RemoveMember(ctx, uint64(id))
+ switch {
+ case err == membership.ErrIDRemoved:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id)))
+ case err == membership.ErrIDNotFound:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
+ case err != nil:
+ h.lg.Warn(
+ "failed to remove a member",
+ zap.String("member-id", id.String()),
+ zap.Error(err),
+ )
+ writeError(h.lg, w, r, err)
+ default:
+ w.WriteHeader(http.StatusNoContent)
+ }
+
+ case "PUT":
+ id, ok := getID(h.lg, r.URL.Path, w)
+ if !ok {
+ return
+ }
+ req := httptypes.MemberUpdateRequest{}
+ if ok := unmarshalRequest(h.lg, r, &req, w); !ok {
+ return
+ }
+ m := membership.Member{
+ ID: id,
+ RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()},
+ }
+ _, err := h.server.UpdateMember(ctx, m)
+ switch {
+ case err == membership.ErrPeerURLexists:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
+ case err == membership.ErrIDNotFound:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
+ case err != nil:
+ h.lg.Warn(
+ "failed to update a member",
+ zap.String("member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ writeError(h.lg, w, r, err)
+ default:
+ w.WriteHeader(http.StatusNoContent)
+ }
+ }
+}
+
+type statsHandler struct {
+ lg *zap.Logger
+ stats stats.Stats
+}
+
+func (h *statsHandler) serveStore(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(h.stats.StoreStats())
+}
+
+func (h *statsHandler) serveSelf(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(h.stats.SelfStats())
+}
+
+func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ stats := h.stats.LeaderStats()
+ if stats == nil {
+ etcdhttp.WriteError(h.lg, w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(stats)
+}
+
+// parseKeyRequest converts a received http.Request on keysPrefix to
+// a server Request, performing validation of supplied fields as appropriate.
+// If any validation fails, an empty Request and non-nil error is returned.
+func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) {
+ var noValueOnSuccess bool
+ emptyReq := etcdserverpb.Request{}
+
+ err := r.ParseForm()
+ if err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidForm,
+ err.Error(),
+ )
+ }
+
+ if !strings.HasPrefix(r.URL.Path, keysPrefix) {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidForm,
+ "incorrect key prefix",
+ )
+ }
+ p := path.Join(etcdserver.StoreKeysPrefix, r.URL.Path[len(keysPrefix):])
+
+ var pIdx, wIdx uint64
+ if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeIndexNaN,
+ `invalid value for "prevIndex"`,
+ )
+ }
+ if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeIndexNaN,
+ `invalid value for "waitIndex"`,
+ )
+ }
+
+ var rec, sort, wait, dir, quorum, stream bool
+ if rec, err = getBool(r.Form, "recursive"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "recursive"`,
+ )
+ }
+ if sort, err = getBool(r.Form, "sorted"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "sorted"`,
+ )
+ }
+ if wait, err = getBool(r.Form, "wait"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "wait"`,
+ )
+ }
+ // TODO(jonboulle): define what parameters dir is/isn't compatible with?
+ if dir, err = getBool(r.Form, "dir"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "dir"`,
+ )
+ }
+ if quorum, err = getBool(r.Form, "quorum"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "quorum"`,
+ )
+ }
+ if stream, err = getBool(r.Form, "stream"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "stream"`,
+ )
+ }
+
+ if wait && r.Method != "GET" {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `"wait" can only be used with GET requests`,
+ )
+ }
+
+ pV := r.FormValue("prevValue")
+ if _, ok := r.Form["prevValue"]; ok && pV == "" {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodePrevValueRequired,
+ `"prevValue" cannot be empty`,
+ )
+ }
+
+ if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "noValueOnSuccess"`,
+ )
+ }
+
+ // TTL is nullable, so leave it null if not specified
+ // or an empty string
+ var ttl *uint64
+ if len(r.FormValue("ttl")) > 0 {
+ i, err := getUint64(r.Form, "ttl")
+ if err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeTTLNaN,
+ `invalid value for "ttl"`,
+ )
+ }
+ ttl = &i
+ }
+
+ // prevExist is nullable, so leave it null if not specified
+ var pe *bool
+ if _, ok := r.Form["prevExist"]; ok {
+ bv, err := getBool(r.Form, "prevExist")
+ if err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ "invalid value for prevExist",
+ )
+ }
+ pe = &bv
+ }
+
+ // refresh is nullable, so leave it null if not specified
+ var refresh *bool
+ if _, ok := r.Form["refresh"]; ok {
+ bv, err := getBool(r.Form, "refresh")
+ if err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ "invalid value for refresh",
+ )
+ }
+ refresh = &bv
+ if refresh != nil && *refresh {
+ val := r.FormValue("value")
+ if _, ok := r.Form["value"]; ok && val != "" {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeRefreshValue,
+ `A value was provided on a refresh`,
+ )
+ }
+ if ttl == nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeRefreshTTLRequired,
+ `No TTL value set`,
+ )
+ }
+ }
+ }
+
+ rr := etcdserverpb.Request{
+ Method: r.Method,
+ Path: p,
+ Val: r.FormValue("value"),
+ Dir: dir,
+ PrevValue: pV,
+ PrevIndex: pIdx,
+ PrevExist: pe,
+ Wait: wait,
+ Since: wIdx,
+ Recursive: rec,
+ Sorted: sort,
+ Quorum: quorum,
+ Stream: stream,
+ }
+
+ if pe != nil {
+ rr.PrevExist = pe
+ }
+
+ if refresh != nil {
+ rr.Refresh = refresh
+ }
+
+ // Null TTL is equivalent to unset Expiration
+ if ttl != nil {
+ expr := time.Duration(*ttl) * time.Second
+ rr.Expiration = clock.Now().Add(expr).UnixNano()
+ }
+
+ return rr, noValueOnSuccess, nil
+}
+
+// writeKeyEvent trims the prefix of key path in a single Event under
+// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
+// ResponseWriter, along with the appropriate headers.
+func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error {
+ ev := resp.Event
+ if ev == nil {
+ return errors.New("cannot write empty Event")
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex))
+ w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
+ w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
+
+ if ev.IsCreated() {
+ w.WriteHeader(http.StatusCreated)
+ }
+
+ ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
+ if noValueOnSuccess &&
+ (ev.Action == v2store.Set || ev.Action == v2store.CompareAndSwap ||
+ ev.Action == v2store.Create || ev.Action == v2store.Update) {
+ ev.NodeExtern = nil
+ ev.PrevNode = nil
+ }
+ return json.NewEncoder(w).Encode(ev)
+}
+
+func writeKeyNoAuth(w http.ResponseWriter) {
+ e := v2error.NewError(v2error.EcodeUnauthorized, "Insufficient credentials", 0)
+ e.WriteTo(w)
+}
+
+// writeKeyError logs and writes the given Error to the ResponseWriter.
+// If Error is not an etcdErr, the error will be converted to an etcd error.
+func writeKeyError(lg *zap.Logger, w http.ResponseWriter, err error) {
+ if err == nil {
+ return
+ }
+ switch e := err.(type) {
+ case *v2error.Error:
+ e.WriteTo(w)
+ default:
+ switch err {
+ case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost:
+ if lg != nil {
+ lg.Warn(
+ "v2 response error",
+ zap.String("internal-server-error", err.Error()),
+ )
+ }
+ default:
+ if lg != nil {
+ lg.Warn(
+ "unexpected v2 response error",
+ zap.String("internal-server-error", err.Error()),
+ )
+ }
+ }
+ ee := v2error.NewError(v2error.EcodeRaftInternal, err.Error(), 0)
+ ee.WriteTo(w)
+ }
+}
+
+func handleKeyWatch(ctx context.Context, lg *zap.Logger, w http.ResponseWriter, resp etcdserver.Response, stream bool) {
+ wa := resp.Watcher
+ defer wa.Remove()
+ ech := wa.EventChan()
+ var nch <-chan bool
+ if x, ok := w.(http.CloseNotifier); ok {
+ nch = x.CloseNotify()
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex()))
+ w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
+ w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
+ w.WriteHeader(http.StatusOK)
+
+ // Ensure headers are flushed early, in case of long polling
+ w.(http.Flusher).Flush()
+
+ for {
+ select {
+ case <-nch:
+ // Client closed connection. Nothing to do.
+ return
+ case <-ctx.Done():
+ // Timed out. net/http will close the connection for us, so nothing to do.
+ return
+ case ev, ok := <-ech:
+ if !ok {
+ // If the channel is closed this may be an indication of
+ // that notifications are much more than we are able to
+ // send to the client in time. Then we simply end streaming.
+ return
+ }
+ ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
+ if err := json.NewEncoder(w).Encode(ev); err != nil {
+ // Should never be reached
+ lg.Warn("failed to encode event", zap.Error(err))
+ return
+ }
+ if !stream {
+ return
+ }
+ w.(http.Flusher).Flush()
+ }
+ }
+}
+
+func trimEventPrefix(ev *v2store.Event, prefix string) *v2store.Event {
+ if ev == nil {
+ return nil
+ }
+ // Since the *Event may reference one in the store history
+ // history, we must copy it before modifying
+ e := ev.Clone()
+ trimNodeExternPrefix(e.NodeExtern, prefix)
+ trimNodeExternPrefix(e.PrevNode, prefix)
+ return e
+}
+
+func trimNodeExternPrefix(n *v2store.NodeExtern, prefix string) {
+ if n == nil {
+ return
+ }
+ n.Key = strings.TrimPrefix(n.Key, prefix)
+ for _, nn := range n.ExternNodes {
+ trimNodeExternPrefix(nn, prefix)
+ }
+}
+
+func trimErrorPrefix(err error, prefix string) error {
+ if e, ok := err.(*v2error.Error); ok {
+ e.Cause = strings.TrimPrefix(e.Cause, prefix)
+ }
+ return err
+}
+
+func unmarshalRequest(lg *zap.Logger, r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
+ ctype := r.Header.Get("Content-Type")
+ semicolonPosition := strings.Index(ctype, ";")
+ if semicolonPosition != -1 {
+ ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
+ }
+ if ctype != "application/json" {
+ writeError(lg, w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
+ return false
+ }
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
+ return false
+ }
+ if err := req.UnmarshalJSON(b); err != nil {
+ writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
+ return false
+ }
+ return true
+}
+
+func getID(lg *zap.Logger, p string, w http.ResponseWriter) (types.ID, bool) {
+ idStr := trimPrefix(p, membersPrefix)
+ if idStr == "" {
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return 0, false
+ }
+ id, err := types.IDFromString(idStr)
+ if err != nil {
+ writeError(lg, w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr)))
+ return 0, false
+ }
+ return id, true
+}
+
+// getUint64 extracts a uint64 by the given key from a Form. If the key does
+// not exist in the form, 0 is returned. If the key exists but the value is
+// badly formed, an error is returned. If multiple values are present only the
+// first is considered.
+func getUint64(form url.Values, key string) (i uint64, err error) {
+ if vals, ok := form[key]; ok {
+ i, err = strconv.ParseUint(vals[0], 10, 64)
+ }
+ return
+}
+
+// getBool extracts a bool by the given key from a Form. If the key does not
+// exist in the form, false is returned. If the key exists but the value is
+// badly formed, an error is returned. If multiple values are present only the
+// first is considered.
+func getBool(form url.Values, key string) (b bool, err error) {
+ if vals, ok := form[key]; ok {
+ b, err = strconv.ParseBool(vals[0])
+ }
+ return
+}
+
+// trimPrefix removes a given prefix and any slash following the prefix
+// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == ""
+func trimPrefix(p, prefix string) (s string) {
+ s = strings.TrimPrefix(p, prefix)
+ s = strings.TrimPrefix(s, "/")
+ return
+}
+
+func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection {
+ c := httptypes.MemberCollection(make([]httptypes.Member, len(ms)))
+
+ for i, m := range ms {
+ c[i] = newMember(m)
+ }
+
+ return &c
+}
+
+func newMember(m *membership.Member) httptypes.Member {
+ tm := httptypes.Member{
+ ID: m.ID.String(),
+ Name: m.Name,
+ PeerURLs: make([]string, len(m.PeerURLs)),
+ ClientURLs: make([]string, len(m.ClientURLs)),
+ }
+
+ copy(tm.PeerURLs, m.PeerURLs)
+ copy(tm.ClientURLs, m.ClientURLs)
+
+ return tm
+}
diff --git a/etcd/etcdserver/api/v2http/client_auth.go b/etcd/etcdserver/api/v2http/client_auth.go
new file mode 100644
index 00000000000..15968a5a0b4
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/client_auth.go
@@ -0,0 +1,604 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "encoding/json"
+ "net/http"
+ "path"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes"
+
+ "go.uber.org/zap"
+)
+
+type authHandler struct {
+ lg *zap.Logger
+ sec v2auth.Store
+ cluster api.Cluster
+ clientCertAuthEnabled bool
+}
+
+func hasWriteRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
+ if r.Method == "GET" || r.Method == "HEAD" {
+ return true
+ }
+ return hasRootAccess(lg, sec, r, clientCertAuthEnabled)
+}
+
+func userFromBasicAuth(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User {
+ username, password, ok := r.BasicAuth()
+ if !ok {
+ lg.Warn("malformed basic auth encoding")
+ return nil
+ }
+ user, err := sec.GetUser(username)
+ if err != nil {
+ return nil
+ }
+
+ ok = sec.CheckPassword(user, password)
+ if !ok {
+ lg.Warn("incorrect password", zap.String("user-name", username))
+ return nil
+ }
+ return &user
+}
+
+func userFromClientCertificate(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User {
+ if r.TLS == nil {
+ return nil
+ }
+
+ for _, chains := range r.TLS.VerifiedChains {
+ for _, chain := range chains {
+ lg.Debug("found common name", zap.String("common-name", chain.Subject.CommonName))
+ user, err := sec.GetUser(chain.Subject.CommonName)
+ if err == nil {
+ lg.Debug(
+ "authenticated a user via common name",
+ zap.String("user-name", user.User),
+ zap.String("common-name", chain.Subject.CommonName),
+ )
+ return &user
+ }
+ }
+ }
+ return nil
+}
+
+func hasRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
+ if sec == nil {
+ // No store means no auth available, eg, tests.
+ return true
+ }
+ if !sec.AuthEnabled() {
+ return true
+ }
+
+ var rootUser *v2auth.User
+ if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
+ rootUser = userFromClientCertificate(lg, sec, r)
+ if rootUser == nil {
+ return false
+ }
+ } else {
+ rootUser = userFromBasicAuth(lg, sec, r)
+ if rootUser == nil {
+ return false
+ }
+ }
+
+ for _, role := range rootUser.Roles {
+ if role == v2auth.RootRoleName {
+ return true
+ }
+ }
+
+ lg.Warn(
+ "a user does not have root role for resource",
+ zap.String("root-user", rootUser.User),
+ zap.String("root-role-name", v2auth.RootRoleName),
+ zap.String("resource-path", r.URL.Path),
+ )
+ return false
+}
+
+func hasKeyPrefixAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool {
+ if sec == nil {
+ // No store means no auth available, eg, tests.
+ return true
+ }
+ if !sec.AuthEnabled() {
+ return true
+ }
+
+ var user *v2auth.User
+ if r.Header.Get("Authorization") == "" {
+ if clientCertAuthEnabled {
+ user = userFromClientCertificate(lg, sec, r)
+ }
+ if user == nil {
+ return hasGuestAccess(lg, sec, r, key)
+ }
+ } else {
+ user = userFromBasicAuth(lg, sec, r)
+ if user == nil {
+ return false
+ }
+ }
+
+ writeAccess := r.Method != "GET" && r.Method != "HEAD"
+ for _, roleName := range user.Roles {
+ role, err := sec.GetRole(roleName)
+ if err != nil {
+ continue
+ }
+ if recursive {
+ if role.HasRecursiveAccess(key, writeAccess) {
+ return true
+ }
+ } else if role.HasKeyAccess(key, writeAccess) {
+ return true
+ }
+ }
+
+ lg.Warn(
+ "invalid access for user on key",
+ zap.String("user-name", user.User),
+ zap.String("key", key),
+ )
+ return false
+}
+
+func hasGuestAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string) bool {
+ writeAccess := r.Method != "GET" && r.Method != "HEAD"
+ role, err := sec.GetRole(v2auth.GuestRoleName)
+ if err != nil {
+ return false
+ }
+ if role.HasKeyAccess(key, writeAccess) {
+ return true
+ }
+
+ lg.Warn(
+ "invalid access for a guest role on key",
+ zap.String("role-name", v2auth.GuestRoleName),
+ zap.String("key", key),
+ )
+ return false
+}
+
+func writeNoAuth(lg *zap.Logger, w http.ResponseWriter, r *http.Request) {
+ herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials")
+ if err := herr.WriteTo(w); err != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.Error(err),
+ )
+ }
+}
+
+func handleAuth(mux *http.ServeMux, sh *authHandler) {
+ mux.HandleFunc(authPrefix+"/roles", authCapabilityHandler(sh.baseRoles))
+ mux.HandleFunc(authPrefix+"/roles/", authCapabilityHandler(sh.handleRoles))
+ mux.HandleFunc(authPrefix+"/users", authCapabilityHandler(sh.baseUsers))
+ mux.HandleFunc(authPrefix+"/users/", authCapabilityHandler(sh.handleUsers))
+ mux.HandleFunc(authPrefix+"/enable", authCapabilityHandler(sh.enableDisable))
+}
+
+func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+
+ roles, err := sh.sec.AllRoles()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ if roles == nil {
+ roles = make([]string, 0)
+ }
+
+ err = r.ParseForm()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ var rolesCollections struct {
+ Roles []v2auth.Role `json:"roles"`
+ }
+ for _, roleName := range roles {
+ var role v2auth.Role
+ role, err = sh.sec.GetRole(roleName)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ rolesCollections.Roles = append(rolesCollections.Roles, role)
+ }
+ err = json.NewEncoder(w).Encode(rolesCollections)
+
+ if err != nil {
+ sh.lg.Warn(
+ "failed to encode base roles",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ writeError(sh.lg, w, r, err)
+ return
+ }
+}
+
+func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) {
+ subpath := path.Clean(r.URL.Path[len(authPrefix):])
+ // Split "/roles/rolename/command".
+ // First item is an empty string, second is "roles"
+ pieces := strings.Split(subpath, "/")
+ if len(pieces) == 2 {
+ sh.baseRoles(w, r)
+ return
+ }
+ if len(pieces) != 3 {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
+ return
+ }
+ sh.forRole(w, r, pieces[2])
+}
+
+func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) {
+ if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
+ return
+ }
+ if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+
+ switch r.Method {
+ case "GET":
+ data, err := sh.sec.GetRole(role)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ err = json.NewEncoder(w).Encode(data)
+ if err != nil {
+ sh.lg.Warn(
+ "failed to encode a role",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ return
+ }
+ return
+
+ case "PUT":
+ var in v2auth.Role
+ err := json.NewDecoder(r.Body).Decode(&in)
+ if err != nil {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
+ return
+ }
+ if in.Role != role {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL"))
+ return
+ }
+
+ var out v2auth.Role
+
+ // create
+ if in.Grant.IsEmpty() && in.Revoke.IsEmpty() {
+ err = sh.sec.CreateRole(in)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ w.WriteHeader(http.StatusCreated)
+ out = in
+ } else {
+ if !in.Permissions.IsEmpty() {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke"))
+ return
+ }
+ out, err = sh.sec.UpdateRole(in)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+ }
+
+ err = json.NewEncoder(w).Encode(out)
+ if err != nil {
+ sh.lg.Warn(
+ "failed to encode a role",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ return
+ }
+ return
+
+ case "DELETE":
+ err := sh.sec.DeleteRole(role)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ }
+}
+
+type userWithRoles struct {
+ User string `json:"user"`
+ Roles []v2auth.Role `json:"roles,omitempty"`
+}
+
+type usersCollections struct {
+ Users []userWithRoles `json:"users"`
+}
+
+func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+
+ users, err := sh.sec.AllUsers()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ if users == nil {
+ users = make([]string, 0)
+ }
+
+ err = r.ParseForm()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ ucs := usersCollections{}
+ for _, userName := range users {
+ var user v2auth.User
+ user, err = sh.sec.GetUser(userName)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ uwr := userWithRoles{User: user.User}
+ for _, roleName := range user.Roles {
+ var role v2auth.Role
+ role, err = sh.sec.GetRole(roleName)
+ if err != nil {
+ continue
+ }
+ uwr.Roles = append(uwr.Roles, role)
+ }
+
+ ucs.Users = append(ucs.Users, uwr)
+ }
+ err = json.NewEncoder(w).Encode(ucs)
+
+ if err != nil {
+ sh.lg.Warn(
+ "failed to encode users",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ writeError(sh.lg, w, r, err)
+ return
+ }
+}
+
+func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) {
+ subpath := path.Clean(r.URL.Path[len(authPrefix):])
+ // Split "/users/username".
+ // First item is an empty string, second is "users"
+ pieces := strings.Split(subpath, "/")
+ if len(pieces) == 2 {
+ sh.baseUsers(w, r)
+ return
+ }
+ if len(pieces) != 3 {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
+ return
+ }
+ sh.forUser(w, r, pieces[2])
+}
+
+func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) {
+ if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
+ return
+ }
+ if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+
+ switch r.Method {
+ case "GET":
+ u, err := sh.sec.GetUser(user)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ err = r.ParseForm()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ uwr := userWithRoles{User: u.User}
+ for _, roleName := range u.Roles {
+ var role v2auth.Role
+ role, err = sh.sec.GetRole(roleName)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ uwr.Roles = append(uwr.Roles, role)
+ }
+ err = json.NewEncoder(w).Encode(uwr)
+
+ if err != nil {
+ sh.lg.Warn(
+ "failed to encode roles",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ return
+ }
+ return
+
+ case "PUT":
+ var u v2auth.User
+ err := json.NewDecoder(r.Body).Decode(&u)
+ if err != nil {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
+ return
+ }
+ if u.User != user {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL"))
+ return
+ }
+
+ var (
+ out v2auth.User
+ created bool
+ )
+
+ if len(u.Grant) == 0 && len(u.Revoke) == 0 {
+ // create or update
+ if len(u.Roles) != 0 {
+ out, err = sh.sec.CreateUser(u)
+ } else {
+ // if user passes in both password and roles, we are unsure about his/her
+ // intention.
+ out, created, err = sh.sec.CreateOrUpdateUser(u)
+ }
+
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ } else {
+ // update case
+ if len(u.Roles) != 0 {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke"))
+ return
+ }
+ out, err = sh.sec.UpdateUser(u)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ }
+
+ if created {
+ w.WriteHeader(http.StatusCreated)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ }
+
+ out.Password = ""
+
+ err = json.NewEncoder(w).Encode(out)
+ if err != nil {
+ sh.lg.Warn(
+ "failed to encode a user",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ return
+ }
+ return
+
+ case "DELETE":
+ err := sh.sec.DeleteUser(user)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ }
+}
+
+type enabled struct {
+ Enabled bool `json:"enabled"`
+}
+
+func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
+ return
+ }
+ if !hasWriteRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+ isEnabled := sh.sec.AuthEnabled()
+ switch r.Method {
+ case "GET":
+ jsonDict := enabled{isEnabled}
+ err := json.NewEncoder(w).Encode(jsonDict)
+ if err != nil {
+ sh.lg.Warn(
+ "failed to encode a auth state",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ }
+
+ case "PUT":
+ err := sh.sec.EnableAuth()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ case "DELETE":
+ err := sh.sec.DisableAuth()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ }
+}
diff --git a/etcd/etcdserver/api/v2http/http.go b/etcd/etcdserver/api/v2http/http.go
new file mode 100644
index 00000000000..e1480afbff7
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/http.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "math"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes"
+
+ "go.uber.org/zap"
+)
+
+const (
+ // time to wait for a Watch request
+ defaultWatchTimeout = time.Duration(math.MaxInt64)
+)
+
+func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) {
+ if err == nil {
+ return
+ }
+ if e, ok := err.(v2auth.Error); ok {
+ herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
+ if et := herr.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("v2auth-error", e.Error()),
+ zap.Error(et),
+ )
+ }
+ }
+ return
+ }
+ etcdhttp.WriteError(lg, w, r, err)
+}
+
+// allowMethod verifies that the given method is one of the allowed methods,
+// and if not, it writes an error to w. A boolean is returned indicating
+// whether or not the method is allowed.
+func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
+ for _, meth := range ms {
+ if m == meth {
+ return true
+ }
+ }
+ w.Header().Set("Allow", strings.Join(ms, ","))
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return false
+}
+
+func requestLogger(lg *zap.Logger, handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if lg != nil {
+ lg.Debug(
+ "handling HTTP request",
+ zap.String("method", r.Method),
+ zap.String("request-uri", r.RequestURI),
+ zap.String("remote-addr", r.RemoteAddr),
+ )
+ }
+ handler.ServeHTTP(w, r)
+ })
+}
diff --git a/server/etcdserver/api/etcdhttp/types/errors.go b/etcd/etcdserver/api/v2http/httptypes/errors.go
similarity index 100%
rename from server/etcdserver/api/etcdhttp/types/errors.go
rename to etcd/etcdserver/api/v2http/httptypes/errors.go
diff --git a/server/etcdserver/api/etcdhttp/types/errors_test.go b/etcd/etcdserver/api/v2http/httptypes/errors_test.go
similarity index 100%
rename from server/etcdserver/api/etcdhttp/types/errors_test.go
rename to etcd/etcdserver/api/v2http/httptypes/errors_test.go
diff --git a/etcd/etcdserver/api/v2http/httptypes/member.go b/etcd/etcdserver/api/v2http/httptypes/member.go
new file mode 100644
index 00000000000..30c6bb743fd
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/httptypes/member.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package httptypes defines how etcd's HTTP API entities are serialized to and
+// deserialized from JSON.
+package httptypes
+
+import (
+ "encoding/json"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+)
+
+type Member struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ PeerURLs []string `json:"peerURLs"`
+ ClientURLs []string `json:"clientURLs"`
+}
+
+type MemberCreateRequest struct {
+ PeerURLs types.URLs
+}
+
+type MemberUpdateRequest struct {
+ MemberCreateRequest
+}
+
+func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error {
+ s := struct {
+ PeerURLs []string `json:"peerURLs"`
+ }{}
+
+ err := json.Unmarshal(data, &s)
+ if err != nil {
+ return err
+ }
+
+ urls, err := types.NewURLs(s.PeerURLs)
+ if err != nil {
+ return err
+ }
+
+ m.PeerURLs = urls
+ return nil
+}
+
+type MemberCollection []Member
+
+func (c *MemberCollection) MarshalJSON() ([]byte, error) {
+ d := struct {
+ Members []Member `json:"members"`
+ }{
+ Members: []Member(*c),
+ }
+
+ return json.Marshal(d)
+}
diff --git a/etcd/etcdserver/api/v2http/httptypes/member_test.go b/etcd/etcdserver/api/v2http/httptypes/member_test.go
new file mode 100644
index 00000000000..3704256a0ad
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/httptypes/member_test.go
@@ -0,0 +1,135 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httptypes
+
+import (
+ "encoding/json"
+ "net/url"
+ "reflect"
+ "testing"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+)
+
+func TestMemberUnmarshal(t *testing.T) {
+ tests := []struct {
+ body []byte
+ wantMember Member
+ wantError bool
+ }{
+ // no URLs, just check ID & Name
+ {
+ body: []byte(`{"id": "c", "name": "dungarees"}`),
+ wantMember: Member{ID: "c", Name: "dungarees", PeerURLs: nil, ClientURLs: nil},
+ },
+
+ // both client and peer URLs
+ {
+ body: []byte(`{"peerURLs": ["http://127.0.0.1:2379"], "clientURLs": ["http://127.0.0.1:2379"]}`),
+ wantMember: Member{
+ PeerURLs: []string{
+ "http://127.0.0.1:2379",
+ },
+ ClientURLs: []string{
+ "http://127.0.0.1:2379",
+ },
+ },
+ },
+
+ // multiple peer URLs
+ {
+ body: []byte(`{"peerURLs": ["http://127.0.0.1:2379", "https://example.com"]}`),
+ wantMember: Member{
+ PeerURLs: []string{
+ "http://127.0.0.1:2379",
+ "https://example.com",
+ },
+ ClientURLs: nil,
+ },
+ },
+
+ // multiple client URLs
+ {
+ body: []byte(`{"clientURLs": ["http://127.0.0.1:2379", "https://example.com"]}`),
+ wantMember: Member{
+ PeerURLs: nil,
+ ClientURLs: []string{
+ "http://127.0.0.1:2379",
+ "https://example.com",
+ },
+ },
+ },
+
+ // invalid JSON
+ {
+ body: []byte(`{"peerU`),
+ wantError: true,
+ },
+ }
+
+ for i, tt := range tests {
+ got := Member{}
+ err := json.Unmarshal(tt.body, &got)
+ if tt.wantError != (err != nil) {
+ t.Errorf("#%d: want error %t, got %v", i, tt.wantError, err)
+ continue
+ }
+
+ if !reflect.DeepEqual(tt.wantMember, got) {
+ t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.wantMember, got)
+ }
+ }
+}
+
+func TestMemberCreateRequestUnmarshal(t *testing.T) {
+ body := []byte(`{"peerURLs": ["http://127.0.0.1:8081", "https://127.0.0.1:8080"]}`)
+ want := MemberCreateRequest{
+ PeerURLs: types.URLs([]url.URL{
+ {Scheme: "http", Host: "127.0.0.1:8081"},
+ {Scheme: "https", Host: "127.0.0.1:8080"},
+ }),
+ }
+
+ var req MemberCreateRequest
+ if err := json.Unmarshal(body, &req); err != nil {
+ t.Fatalf("Unmarshal returned unexpected err=%v", err)
+ }
+
+ if !reflect.DeepEqual(want, req) {
+ t.Fatalf("Failed to unmarshal MemberCreateRequest: want=%#v, got=%#v", want, req)
+ }
+}
+
+func TestMemberCreateRequestUnmarshalFail(t *testing.T) {
+ tests := [][]byte{
+ // invalid JSON
+ []byte(``),
+ []byte(`{`),
+
+ // spot-check validation done in types.NewURLs
+ []byte(`{"peerURLs": "foo"}`),
+ []byte(`{"peerURLs": ["."]}`),
+ []byte(`{"peerURLs": []}`),
+ []byte(`{"peerURLs": ["http://127.0.0.1:2379/foo"]}`),
+ []byte(`{"peerURLs": ["http://127.0.0.1"]}`),
+ }
+
+ for i, tt := range tests {
+ var req MemberCreateRequest
+ if err := json.Unmarshal(tt, &req); err == nil {
+ t.Errorf("#%d: expected err, got nil", i)
+ }
+ }
+}
diff --git a/etcd/etcdserver/api/v2http/metrics.go b/etcd/etcdserver/api/v2http/metrics.go
new file mode 100644
index 00000000000..527b9db6bd9
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/metrics.go
@@ -0,0 +1,98 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes"
+ "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ incomingEvents = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "http",
+ Name: "received_total",
+ Help: "Counter of requests received into the system (successfully parsed and authd).",
+ }, []string{"method"})
+
+ failedEvents = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "http",
+ Name: "failed_total",
+ Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).",
+ }, []string{"method", "code"})
+
+ successfulEventsHandlingSec = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "http",
+ Name: "successful_duration_seconds",
+ Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).",
+
+ // lowest bucket start of upper bound 0.0005 sec (0.5 ms) with factor 2
+ // highest bucket start of 0.0005 sec * 2^12 == 2.048 sec
+ Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
+ }, []string{"method"})
+)
+
+func init() {
+ prometheus.MustRegister(incomingEvents)
+ prometheus.MustRegister(failedEvents)
+ prometheus.MustRegister(successfulEventsHandlingSec)
+}
+
+func reportRequestReceived(request etcdserverpb.Request) {
+ incomingEvents.WithLabelValues(methodFromRequest(request)).Inc()
+}
+
+func reportRequestCompleted(request etcdserverpb.Request, startTime time.Time) {
+ method := methodFromRequest(request)
+ successfulEventsHandlingSec.WithLabelValues(method).Observe(time.Since(startTime).Seconds())
+}
+
+func reportRequestFailed(request etcdserverpb.Request, err error) {
+ method := methodFromRequest(request)
+ failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc()
+}
+
+func methodFromRequest(request etcdserverpb.Request) string {
+ if request.Method == "GET" && request.Quorum {
+ return "QGET"
+ }
+ return request.Method
+}
+
+func codeFromError(err error) int {
+ if err == nil {
+ return http.StatusInternalServerError
+ }
+ switch e := err.(type) {
+ case *v2error.Error:
+ return e.StatusCode()
+ case *httptypes.HTTPError:
+ return e.Code
+ default:
+ return http.StatusInternalServerError
+ }
+}
diff --git a/etcd/etcdserver/api/v2http/testdata/ca.pem b/etcd/etcdserver/api/v2http/testdata/ca.pem
new file mode 100644
index 00000000000..60cbee3bb4b
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/testdata/ca.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDEjCCAfqgAwIBAgIIYpX+8HgWGfkwDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE
+AxMKZXRjZCB0ZXN0czAeFw0xNTExMjQwMzA1MDBaFw0yMDExMjIwMzA1MDBaMBUx
+EzARBgNVBAMTCmV0Y2QgdGVzdHMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQDa9PkwEwiBD8mB+VIKz5r5gRHnNF4Icj6T6R/RsdatecQe6vU0EU4FXtKZ
+drWnCGlATyrQooqHpb+rDc7CUt3mXrIxrNkcGTMaesF7P0GWxVkyOGSjJMxGBv3e
+bAZknBe4eLMi68L1aT/uYmxcp/B3L2mfdFtc1Gd6mYJpNm1PgilRyIrO0mY5ysIX
+4WHCa3yudAv8HrFbQcw7l7OyKA6uSWg6h07lE3d5jw5YOly+hz0iaRtzhb4tJrYD
+Lm1tehb0nnoLuW6yYblRSoyBVDT50MFVlyvW40Po5WkOXw/wnsnyxWRR4yqU23wq
+quQU0HXJEBLFnT+KbLOQ0EAE35vXAgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjAS
+BgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSbUCGB95ochDrbEZlzGGYuA7xu
+xjAfBgNVHSMEGDAWgBSbUCGB95ochDrbEZlzGGYuA7xuxjANBgkqhkiG9w0BAQsF
+AAOCAQEAardO/SGCu7Snz3YRBUinzpZEUFTFend+FJtBkxBXCao1RvTXg8PBMkza
+LUsaR4mLsGoXLIbNCoIinvVG0QULYCZe11N3l1L0G2g5uhEM4MfJ2rwrMD0o17i+
+nwNRRE3tfKAlWhYQg+4ye36kQVxASPniHjdQgjKYUFTNXdyG6DzuAclaVte9iVw6
+cWl61fB2CZya3+uMtih8t/Kgl2KbMO2PvNByfnDjKmW+v58qHbXyoJZqnpvDn14+
+p2Ox+AvvxYiEiUIvFdWy101QB7NJMCtdwq6oG6OvIOgXzLgitTFSq4kfWDfupQjW
+iFoQ+vWmYhK5ld0nBaiz+JmHuemK7A==
+-----END CERTIFICATE-----
diff --git a/etcd/etcdserver/api/v2http/testdata/otheruser.pem b/etcd/etcdserver/api/v2http/testdata/otheruser.pem
new file mode 100644
index 00000000000..d0c74eb9f8d
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/testdata/otheruser.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDOTCCAiGgAwIBAgIINYpsso1f3SswDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE
+AxMKZXRjZCB0ZXN0czAeFw0xNTExMjQwMzA4MDBaFw0xNjExMjMwMzA4MDBaMBQx
+EjAQBgNVBAMTCW90aGVydXNlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAPOAUa5GblwIjHTEnox2c/Am9jV1TMvzBuVXxnp2UnNHMNwstAooFrEs/Z+d
+ft5AOsooP6zVuM3eBQa4i9huJbVNDfPU2H94yA89jYfJYUgo7C838V6NjGsCCptQ
+WzkKPNlDbT9xA/7XpIUJ2WltuYDRrjWq8pXQONqTjcg5n4l0JO8xdHJHRUkFQ76F
+1npXeLndgGaP11lqzpYlglEGi5URhzAT1xxQ0hLSe8WNmiCxxkq++C8Gx4sPg9mX
+M94aoJDzZSnoaqDxckbP/7Q0ZKe/fVdCFkd5+jqT4Mt7hwmz9jTCHcVnAz4EKI+t
+rbWgbCfMK6013GotXz7InStVe+MCAwEAAaOBjTCBijAOBgNVHQ8BAf8EBAMCBaAw
+HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYD
+VR0OBBYEFFwMmf+pnaejmri6y1T+lfU+MBq/MB8GA1UdIwQYMBaAFJtQIYH3mhyE
+OtsRmXMYZi4DvG7GMAsGA1UdEQQEMAKCADANBgkqhkiG9w0BAQsFAAOCAQEACOn6
+mec29MTMGPt/EPOmSyhvTKSwH+5YWjCbyUFeoB8puxrJlIphK4mvT+sXp2wzno89
+FVCliO/rJurdErKvyOjlK1QrVGPYIt7Wz9ssAfvlwCyBM8PqgEG8dJN9aAkf2h4r
+Ye+hBh1y6Nnataf7lxe9mqAOvD/7wVIgzjCnMD1q5QSY2Mln3HwVQXtbZFbY363Z
+X9Fk3PUpjJSX9jbEz9kIlT8AJAdxl6GB8Z9B8PrA8qf4Bhk15ICRHxb67EhDrGWV
+8q7ArU2XBqs/+GWpUIMoGKNZv+K+/SksZK1KnzaUvApUCJzt+ac+p8HOgMdvDRgr
+GfVVJqcZgyEmeczy0A==
+-----END CERTIFICATE-----
diff --git a/etcd/etcdserver/api/v2http/testdata/user.pem b/etcd/etcdserver/api/v2http/testdata/user.pem
new file mode 100644
index 00000000000..0fc2108651b
--- /dev/null
+++ b/etcd/etcdserver/api/v2http/testdata/user.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDNDCCAhygAwIBAgIIcQ0DAfgevocwDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE
+AxMKZXRjZCB0ZXN0czAeFw0xNTExMjQwMzA4MDBaFw0xNjExMjMwMzA4MDBaMA8x
+DTALBgNVBAMTBHVzZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQD0
++3Lm1SmUJJLufaFTYz+e5qyQEshNRyeAhXIeZ1aw+yBjslXGZQ3/uGOwnOnGqUeA
+Nidc9ty4NsK6RVppHlezUrBnpl4hws8vHWFKZpU2R6kKL8EYLmg+iVqEBj7XqfAp
+8bJqqZI3KOqLXpRH55mA69KP7VEK9ngTVR/tERSrUPT8jcjwbvhSOqD8Qk07BUDR
+6RpDr94Mnaf+fMGG36Sh7iUl+i4Oh6FFar+7+b0+5Bhs2/6uVsK4A1Z3jqqfSQH8
+q8Wf5h9Ka4aqGSw4ia5G3Uw7Jsl2aDgpJ7uwJo1k8SclbMYnYdhZuo+U+esY/Fai
+YdbjG+AroZ+y9TB8bMlHAgMBAAGjgY0wgYowDgYDVR0PAQH/BAQDAgWgMB0GA1Ud
+JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW
+BBRuTt0lJIVKYaz76aSxl/MQOLRwfDAfBgNVHSMEGDAWgBSbUCGB95ochDrbEZlz
+GGYuA7xuxjALBgNVHREEBDACggAwDQYJKoZIhvcNAQELBQADggEBABLRWZm+Lgjs
+c5qDXbgOJW2pR630syY8ixR9c6HvzPVJim8mFioMX+xrlbOC6BmOUlOb9j83bTKn
+aOg/0xlpxNbd8QYzgRxZmHZLULPdiNeeRvIzsrzrH88+inrmZhRXRVcHjdO6CG6t
+hCdDdRiNU6GkF7dPna0xNcEOKe2wUfzd1ZtKOqzi1w+fKjSeMplZomeWgP4WRvkh
+JJ/0ujlMMckgyTxRh8EEaJ35OnpXX7EdipoWhOMmiUnlPqye2icC8Y+CMdZsrod6
+nkoEQnXDCLf/Iv0qj7B9iKbxn7t3QDVxY4UILUReDuD8yrGULlGOl//aY/T3pkZ6
+R5trduZhI3o=
+-----END CERTIFICATE-----
diff --git a/server/etcdserver/api/v2stats/leader.go b/etcd/etcdserver/api/v2stats/leader.go
similarity index 100%
rename from server/etcdserver/api/v2stats/leader.go
rename to etcd/etcdserver/api/v2stats/leader.go
diff --git a/etcd/etcdserver/api/v2stats/queue.go b/etcd/etcdserver/api/v2stats/queue.go
new file mode 100644
index 00000000000..60b7342fbf1
--- /dev/null
+++ b/etcd/etcdserver/api/v2stats/queue.go
@@ -0,0 +1,108 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2stats
+
+import (
+ "sync"
+ "time"
+)
+
+const (
+ queueCapacity = 200
+)
+
+// RequestStats represent the stats for a request.
+// It encapsulates the sending time and the size of the request.
+type RequestStats struct {
+ SendingTime time.Time
+ Size int
+}
+
+type statsQueue struct {
+ items [queueCapacity]*RequestStats
+ size int
+ front int
+ back int
+ totalReqSize int
+ rwl sync.RWMutex
+}
+
+func (q *statsQueue) Len() int {
+ return q.size
+}
+
+func (q *statsQueue) ReqSize() int {
+ return q.totalReqSize
+}
+
+// FrontAndBack gets the front and back elements in the queue
+// We must grab front and back together with the protection of the lock
+func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) {
+ q.rwl.RLock()
+ defer q.rwl.RUnlock()
+ if q.size != 0 {
+ return q.items[q.front], q.items[q.back]
+ }
+ return nil, nil
+}
+
+func (q *statsQueue) Insert(p *RequestStats) {
+ q.rwl.Lock()
+ defer q.rwl.Unlock()
+
+ q.back = (q.back + 1) % queueCapacity // 200
+
+ if q.size == queueCapacity { // dequeue
+ q.totalReqSize -= q.items[q.front].Size
+ q.front = (q.back + 1) % queueCapacity
+ } else {
+ q.size++
+ }
+
+ q.items[q.back] = p
+ q.totalReqSize += q.items[q.back].Size
+}
+
+// Rate function returns the package rate and byte rate
+func (q *statsQueue) Rate() (float64, float64) {
+ front, back := q.frontAndBack()
+
+ if front == nil || back == nil {
+ return 0, 0
+ }
+
+ if time.Since(back.SendingTime) > time.Second {
+ q.Clear()
+ return 0, 0
+ }
+
+ sampleDuration := back.SendingTime.Sub(front.SendingTime)
+
+ pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second)
+
+ br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second)
+
+ return pr, br
+}
+
+// Clear function clear up the statsQueue
+func (q *statsQueue) Clear() {
+ q.rwl.Lock()
+ defer q.rwl.Unlock()
+ q.back = -1
+ q.front = 0
+ q.size = 0
+ q.totalReqSize = 0
+}
diff --git a/etcd/etcdserver/api/v2stats/server.go b/etcd/etcdserver/api/v2stats/server.go
new file mode 100644
index 00000000000..8ef6b075a70
--- /dev/null
+++ b/etcd/etcdserver/api/v2stats/server.go
@@ -0,0 +1,134 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2stats
+
+import (
+ "encoding/json"
+ "log"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/raft"
+)
+
+// ServerStats 封装了关于EtcdServer及其与集群其他成员通信的各种统计信息
+type ServerStats struct {
+ serverStats
+ sync.Mutex
+}
+
+func NewServerStats(name, id string) *ServerStats {
+ ss := &ServerStats{
+ serverStats: serverStats{
+ Name: name,
+ ID: id,
+ },
+ }
+ now := time.Now()
+ ss.StartTime = now
+ ss.LeaderInfo.StartTime = now
+ ss.sendRateQueue = &statsQueue{back: -1}
+ ss.recvRateQueue = &statsQueue{back: -1}
+ return ss
+}
+
+type serverStats struct {
+ Name string `json:"name"` // 该节点的name .
+ ID string `json:"id"` // 每个节点的唯一标识符.
+ State raft.StateType `json:"state"` // 该节点在Raft 协议里的角色, Leader 或Follower .
+ StartTime time.Time `json:"startTime"` // 该etcd server 的启动时间.
+ LeaderInfo struct { //
+ Name string `json:"leader"` //
+ Uptime string `json:"uptime"` // 集群当前Leader 的在任时长.
+ StartTime time.Time `json:"startTime"` // leader首次通信的时间
+ } `json:"leaderInfo"` //
+ sendRateQueue *statsQueue // 发送消息的队列
+ SendAppendRequestCnt uint64 `json:"sendAppendRequestCnt"` // 该节点已发送的append 请求数.
+ SendingPkgRate float64 `json:"sendPkgRate,omitempty"` // 该节点每秒发送的请求数( 只有Follower 才有, 并且单 节点集群没有这项数据) .
+ SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"` // 该节点每秒发送的字节(只有Follower 才有,且单节点集群没有这项数据) .
+ recvRateQueue *statsQueue // 处理接受消息的队列
+ RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"` // 该节点己处理的append 请求数.
+ RecvingPkgRate float64 `json:"recvPkgRate,omitempty"` // 该节点每秒收到的请求数(只有Follower 才有) .
+ RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"` // 该节点每秒收到的字节(只有Follower 才有) .
+}
+
+func (ss *ServerStats) JSON() []byte {
+ ss.Lock()
+ stats := ss.serverStats
+ stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
+ stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
+ stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
+ ss.Unlock()
+ b, err := json.Marshal(stats)
+ // TODO(jonboulle): appropriate error handling?
+ if err != nil {
+ log.Printf("stats: error marshalling etcd stats: %v", err)
+ }
+ return b
+}
+
+// RecvAppendReq 在收到来自leader的AppendRequest后,更新ServerStats.
+func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {
+ ss.Lock()
+ defer ss.Unlock()
+
+ now := time.Now()
+
+ ss.State = raft.StateFollower
+ if leader != ss.LeaderInfo.Name {
+ ss.LeaderInfo.Name = leader
+ ss.LeaderInfo.StartTime = now
+ }
+
+ ss.recvRateQueue.Insert(
+ &RequestStats{
+ SendingTime: now,
+ Size: reqSize,
+ },
+ )
+ ss.RecvAppendRequestCnt++
+}
+
+// SendAppendReq updates the ServerStats in response to an AppendRequest
+// being sent by this etcd
+func (ss *ServerStats) SendAppendReq(reqSize int) {
+ ss.Lock()
+ defer ss.Unlock()
+
+ ss.becomeLeader()
+
+ ss.sendRateQueue.Insert(
+ &RequestStats{
+ SendingTime: time.Now(),
+ Size: reqSize,
+ },
+ )
+
+ ss.SendAppendRequestCnt++
+}
+
+func (ss *ServerStats) BecomeLeader() {
+ ss.Lock()
+ defer ss.Unlock()
+ ss.becomeLeader()
+}
+
+func (ss *ServerStats) becomeLeader() {
+ if ss.State != raft.StateLeader {
+ ss.State = raft.StateLeader
+ ss.LeaderInfo.Name = ss.ID
+ ss.LeaderInfo.StartTime = time.Now()
+ }
+}
diff --git a/etcd/etcdserver/api/v2stats/stats.go b/etcd/etcdserver/api/v2stats/stats.go
new file mode 100644
index 00000000000..20d7dd0e115
--- /dev/null
+++ b/etcd/etcdserver/api/v2stats/stats.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2stats defines a standard interface for etcd cluster statistics.
+package v2stats
+
+type Stats interface {
+ // SelfStats returns the struct representing statistics of this etcd
+ SelfStats() []byte
+ // LeaderStats returns the statistics of all followers in the cluster
+ // if this etcd is leader. Otherwise, nil is returned.
+ LeaderStats() []byte
+ // StoreStats returns statistics of the store backing this EtcdServer
+ StoreStats() []byte
+}
diff --git a/etcd/etcdserver/api/v2store/event.go b/etcd/etcdserver/api/v2store/event.go
new file mode 100644
index 00000000000..34f9a2df70a
--- /dev/null
+++ b/etcd/etcdserver/api/v2store/event.go
@@ -0,0 +1,72 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+const (
+ Get = "get"
+ Create = "create"
+ Set = "set"
+ Update = "update"
+ Delete = "delete"
+ CompareAndSwap = "compareAndSwap"
+ CompareAndDelete = "compareAndDelete"
+ Expire = "expire"
+)
+
+type Event struct {
+ Action string `json:"action"`
+ NodeExtern *NodeExtern `json:"node,omitempty"`
+ PrevNode *NodeExtern `json:"prevNode,omitempty"`
+ EtcdIndex uint64 `json:"-"`
+ Refresh bool `json:"refresh,omitempty"`
+}
+
+// 节点变更事件、包括节点的创建、删除...
+func newEvent(action string, key string, modifiedIndex, createdIndex uint64) *Event {
+ n := &NodeExtern{
+ Key: key,
+ ModifiedIndex: modifiedIndex,
+ CreatedIndex: createdIndex,
+ }
+
+ return &Event{
+ Action: action,
+ NodeExtern: n,
+ }
+}
+
+func (e *Event) IsCreated() bool {
+ if e.Action == Create {
+ return true
+ }
+ return e.Action == Set && e.PrevNode == nil
+}
+
+func (e *Event) Index() uint64 {
+ return e.NodeExtern.ModifiedIndex
+}
+
+func (e *Event) Clone() *Event {
+ return &Event{
+ Action: e.Action,
+ EtcdIndex: e.EtcdIndex,
+ NodeExtern: e.NodeExtern.Clone(),
+ PrevNode: e.PrevNode.Clone(),
+ }
+}
+
+func (e *Event) SetRefresh() {
+ e.Refresh = true
+}
diff --git a/server/etcdserver/api/v2store/event_history.go b/etcd/etcdserver/api/v2store/event_history.go
similarity index 94%
rename from server/etcdserver/api/v2store/event_history.go
rename to etcd/etcdserver/api/v2store/event_history.go
index c9bcdca0513..f43638b630a 100644
--- a/server/etcdserver/api/v2store/event_history.go
+++ b/etcd/etcdserver/api/v2store/event_history.go
@@ -20,9 +20,10 @@ import (
"strings"
"sync"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
)
+// EventHistory 历史事件
type EventHistory struct {
Queue eventQueue
StartIndex uint64
@@ -79,7 +80,7 @@ func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event,
e := eh.Queue.Events[i]
if !e.Refresh {
- ok := e.Node.Key == key
+ ok := e.NodeExtern.Key == key
if recursive {
// add tailing slash
@@ -88,7 +89,7 @@ func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event,
nkey = nkey + "/"
}
- ok = ok || strings.HasPrefix(e.Node.Key, nkey)
+ ok = ok || strings.HasPrefix(e.NodeExtern.Key, nkey)
}
if (e.Action == Delete || e.Action == Expire) && e.PrevNode != nil && e.PrevNode.Dir {
@@ -125,5 +126,4 @@ func (eh *EventHistory) clone() *EventHistory {
Queue: clonedQueue,
LastIndex: eh.LastIndex,
}
-
}
diff --git a/server/etcdserver/api/v2store/event_queue.go b/etcd/etcdserver/api/v2store/event_queue.go
similarity index 95%
rename from server/etcdserver/api/v2store/event_queue.go
rename to etcd/etcdserver/api/v2store/event_queue.go
index 7ea03de8c9a..aa2a645d6ff 100644
--- a/server/etcdserver/api/v2store/event_queue.go
+++ b/etcd/etcdserver/api/v2store/event_queue.go
@@ -26,7 +26,7 @@ func (eq *eventQueue) insert(e *Event) {
eq.Events[eq.Back] = e
eq.Back = (eq.Back + 1) % eq.Capacity
- if eq.Size == eq.Capacity { //dequeue
+ if eq.Size == eq.Capacity { // dequeue
eq.Front = (eq.Front + 1) % eq.Capacity
} else {
eq.Size++
diff --git a/etcd/etcdserver/api/v2store/node.go b/etcd/etcdserver/api/v2store/node.go
new file mode 100644
index 00000000000..67d44b638d6
--- /dev/null
+++ b/etcd/etcdserver/api/v2store/node.go
@@ -0,0 +1,359 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "path"
+ "sort"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+
+ "github.com/jonboulle/clockwork"
+)
+
+// 对比函数结果的说明
+const (
+ CompareMatch = iota // 匹配
+ CompareIndexNotMatch // 索引不匹配
+ CompareValueNotMatch // 值不匹配
+ CompareNotMatch // 不匹配
+)
+
+var Permanent time.Time // 永久性时间,默认零值
+
+// node is the basic element in the store system.
+// A key-value pair will have a string value
+// A directory will have a children map
+type node struct {
+ Path string
+ CreatedIndex uint64
+ ModifiedIndex uint64
+ Parent *node `json:"-"` // 不应该对这个字段进行编码!避免循环依赖.
+ ExpireTime time.Time
+ Value string // 键值对
+ Children map[string]*node // 目录
+ store *store // 对该节点所连接的商店的引用.
+}
+
+// newKV creates a Key-Value pair
+func newKV(store *store, nodePath string, value string, createdIndex uint64, parent *node, expireTime time.Time) *node {
+ return &node{
+ Path: nodePath,
+ CreatedIndex: createdIndex,
+ ModifiedIndex: createdIndex,
+ Parent: parent,
+ store: store,
+ ExpireTime: expireTime,
+ Value: value,
+ }
+}
+
+// Write function set the value of the node to the given value.
+// If the receiver node is a directory, a "Not A File" error will be returned.
+func (n *node) Write(value string, index uint64) *v2error.Error {
+ if n.IsDir() {
+ return v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex)
+ }
+
+ n.Value = value
+ n.ModifiedIndex = index
+
+ return nil
+}
+
+// Remove 清理node包含的子数据
+func (n *node) Remove(dir, recursive bool, callback func(path string)) *v2error.Error {
+ if !n.IsDir() { // key-value pair
+ _, name := path.Split(n.Path)
+
+ // find its parent and remove the node from the map
+ if n.Parent != nil && n.Parent.Children[name] == n {
+ delete(n.Parent.Children, name)
+ }
+
+ if callback != nil {
+ callback(n.Path)
+ }
+
+ if !n.IsPermanent() {
+ n.store.ttlKeyHeap.remove(n)
+ }
+
+ return nil
+ }
+
+ if !dir {
+ // cannot delete a directory without dir set to true
+ return v2error.NewError(v2error.EcodeNotFile, n.Path, n.store.CurrentIndex)
+ }
+
+ if len(n.Children) != 0 && !recursive {
+ // cannot delete a directory if it is not empty and the operation
+ // is not recursive
+ return v2error.NewError(v2error.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex)
+ }
+
+ for _, child := range n.Children { // delete all children
+ child.Remove(true, true, callback)
+ }
+
+ // delete self
+ _, name := path.Split(n.Path)
+ if n.Parent != nil && n.Parent.Children[name] == n {
+ delete(n.Parent.Children, name)
+
+ if callback != nil {
+ callback(n.Path)
+ }
+
+ if !n.IsPermanent() {
+ n.store.ttlKeyHeap.remove(n)
+ }
+ }
+
+ return nil
+}
+
+func (n *node) UpdateTTL(expireTime time.Time) {
+ if !n.IsPermanent() {
+ if expireTime.IsZero() {
+ // from ttl to permanent
+ n.ExpireTime = expireTime
+ // remove from ttl heap
+ n.store.ttlKeyHeap.remove(n)
+ return
+ }
+
+ // update ttl
+ n.ExpireTime = expireTime
+ // update ttl heap
+ n.store.ttlKeyHeap.update(n)
+ return
+ }
+
+ if expireTime.IsZero() {
+ return
+ }
+
+ // from permanent to ttl
+ n.ExpireTime = expireTime
+ // push into ttl heap
+ n.store.ttlKeyHeap.push(n)
+}
+
+// Compare function compares node index and value with provided ones.
+// second result value explains result and equals to one of Compare.. constants
+func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) {
+ indexMatch := prevIndex == 0 || n.ModifiedIndex == prevIndex
+ valueMatch := prevValue == "" || n.Value == prevValue
+ ok = valueMatch && indexMatch
+ switch {
+ case valueMatch && indexMatch:
+ which = CompareMatch
+ case indexMatch && !valueMatch:
+ which = CompareValueNotMatch
+ case valueMatch && !indexMatch:
+ which = CompareIndexNotMatch
+ default:
+ which = CompareNotMatch
+ }
+ return ok, which
+}
+
+// Clone function clone the node recursively and return the new node.
+// If the node is a directory, it will clone all the content under this directory.
+// If the node is a key-value pair, it will clone the pair.
+func (n *node) Clone() *node {
+ if !n.IsDir() {
+ newkv := newKV(n.store, n.Path, n.Value, n.CreatedIndex, n.Parent, n.ExpireTime)
+ newkv.ModifiedIndex = n.ModifiedIndex
+ return newkv
+ }
+
+ clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime)
+ clone.ModifiedIndex = n.ModifiedIndex
+
+ for key, child := range n.Children {
+ clone.Children[key] = child.Clone()
+ }
+
+ return clone
+}
+
+// recoverAndclean function help to do recovery.
+// Two things need to be done: 1. recovery structure; 2. delete expired nodes
+//
+// If the node is a directory, it will help recover children's parent pointer and recursively
+// call this function on its children.
+// We check the expire last since we need to recover the whole structure first and add all the
+// notifications into the event history.
+func (n *node) recoverAndclean() {
+ if n.IsDir() {
+ for _, child := range n.Children {
+ child.Parent = n
+ child.store = n.store
+ child.recoverAndclean()
+ }
+ }
+
+ if !n.ExpireTime.IsZero() {
+ n.store.ttlKeyHeap.push(n)
+ }
+}
+
+// -------------------------------------- OVER -----------------------------------------------
+
+// List 返回当前节点下的所有节点
+func (n *node) List() ([]*node, *v2error.Error) {
+ if !n.IsDir() {
+ return nil, v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex)
+ }
+
+ nodes := make([]*node, len(n.Children))
+
+ i := 0
+ for _, node := range n.Children {
+ nodes[i] = node
+ i++
+ }
+
+ return nodes, nil
+}
+
+// GetChild 返回目录节点
+func (n *node) GetChild(name string) (*node, *v2error.Error) {
+ if !n.IsDir() {
+ return nil, v2error.NewError(v2error.EcodeNotDir, n.Path, n.store.CurrentIndex)
+ }
+
+ child, ok := n.Children[name]
+
+ if ok {
+ return child, nil
+ }
+
+ return nil, nil
+}
+
+// Add 添加一个子节点
+func (n *node) Add(child *node) *v2error.Error {
+ if !n.IsDir() { // /0/members/8e9e05c52164694d
+ return v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex)
+ }
+
+ _, name := path.Split(child.Path)
+
+ if _, ok := n.Children[name]; ok {
+ return v2error.NewError(v2error.EcodeNodeExist, "", n.store.CurrentIndex)
+ }
+
+ n.Children[name] = child
+
+ return nil
+}
+
+// Repr 递归的 封信过期时间、剩余时间,跳过隐藏节点
+func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern {
+ if n.IsDir() {
+ node := &NodeExtern{
+ Key: n.Path,
+ Dir: true,
+ ModifiedIndex: n.ModifiedIndex,
+ CreatedIndex: n.CreatedIndex,
+ }
+ node.Expiration, node.TTL = n.expirationAndTTL(clock) // 过期时间和 剩余时间 TTL
+ if !recursive {
+ return node
+ }
+ children, _ := n.List()
+ node.ExternNodes = make(NodeExterns, len(children))
+ i := 0
+ for _, child := range children {
+ if child.IsHidden() { // 跳过隐藏节点
+ continue
+ }
+ node.ExternNodes[i] = child.Repr(recursive, sorted, clock)
+ i++
+ }
+ node.ExternNodes = node.ExternNodes[:i]
+ if sorted {
+ sort.Sort(node.ExternNodes)
+ }
+ return node
+ }
+
+ // since n.Value could be changed later, so we need to copy the value out
+ value := n.Value
+ node := &NodeExtern{
+ Key: n.Path,
+ Value: &value,
+ ModifiedIndex: n.ModifiedIndex,
+ CreatedIndex: n.CreatedIndex,
+ }
+ node.Expiration, node.TTL = n.expirationAndTTL(clock) // 过期时间和 剩余时间 TTL
+ return node
+}
+
+// 过期时间和 剩余时间 TTL
+func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) {
+ if !n.IsPermanent() {
+ ttlN := n.ExpireTime.Sub(clock.Now()) // 还有多长时间过期
+ ttl := ttlN / time.Second
+ if (ttlN % time.Second) > 0 {
+ ttl++ // 整除+1
+ }
+ t := n.ExpireTime.UTC()
+ return &t, int64(ttl)
+ }
+ return nil, 0
+}
+
+// newDir 创建一个目录
+func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node {
+ return &node{
+ Path: nodePath,
+ CreatedIndex: createdIndex,
+ ModifiedIndex: createdIndex,
+ Parent: parent,
+ ExpireTime: expireTime,
+ Children: make(map[string]*node),
+ store: store,
+ }
+}
+
+// IsHidden 判断节点名字是不是以 _ 开头 /0/members/_hidden
+func (n *node) IsHidden() bool {
+ _, name := path.Split(n.Path)
+ return name[0] == '_'
+}
+
+// IsPermanent 函数检查该节点是否为永久节点.
+func (n *node) IsPermanent() bool {
+ // 我们使用一个未初始化的time.Time来表示该节点是一个永久的节点. 未初始化的time.Time应该等于0.
+ return n.ExpireTime.IsZero()
+}
+
+func (n *node) IsDir() bool {
+ return n.Children != nil
+}
+
+func (n *node) Read() (string, *v2error.Error) {
+ if n.IsDir() {
+ return "", v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex)
+ }
+
+ return n.Value, nil
+}
diff --git a/etcd/etcdserver/api/v2store/over_node_extern.go b/etcd/etcdserver/api/v2store/over_node_extern.go
new file mode 100644
index 00000000000..4f045282060
--- /dev/null
+++ b/etcd/etcdserver/api/v2store/over_node_extern.go
@@ -0,0 +1,112 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "sort"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+)
+
+var _ node
+
+// NodeExtern 是内部节点的外部表示,带有附加字段 PrevValue是节点的前一个值 TTL是生存时间,以秒为单位
+type NodeExtern struct {
+ Key string `json:"key,omitempty"` // /0/members/8e9e05c52164694d/raftAttributes
+ Value *string `json:"value,omitempty"`
+ Dir bool `json:"dir,omitempty"`
+ Expiration *time.Time `json:"expiration,omitempty"`
+ TTL int64 `json:"ttl,omitempty"`
+ ExternNodes NodeExterns `json:"nodes,omitempty"`
+ ModifiedIndex uint64 `json:"modifiedIndex,omitempty"`
+ CreatedIndex uint64 `json:"createdIndex,omitempty"`
+}
+
+// &v2store.NodeExtern{Key: "/1234", ExternNodes: []*v2store.NodeExtern{
+// {Key: "/1234/attributes", Value: stringp(`{"name":"node1","clientURLs":null}`)},
+// {Key: "/1234/raftAttributes", Value: stringp(`{"peerURLs":null}`)},
+// }}
+
+// 加载node,主要是获取node中数据 n: /0/members
+func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock clockwork.Clock) {
+ if n.IsDir() {
+ eNode.Dir = true
+ children, _ := n.List()
+ eNode.ExternNodes = make(NodeExterns, len(children))
+ // 我们不直接使用子片中的索引,我们需要跳过隐藏的node.
+ i := 0
+ for _, child := range children {
+ if child.IsHidden() {
+ continue
+ }
+ eNode.ExternNodes[i] = child.Repr(recursive, sorted, clock)
+ i++
+ }
+ // 消除隐藏节点
+ eNode.ExternNodes = eNode.ExternNodes[:i]
+ if sorted {
+ sort.Sort(eNode.ExternNodes)
+ }
+ } else {
+ value, _ := n.Read()
+ eNode.Value = &value
+ }
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(clock) // 过期时间和 剩余时间 TTL
+}
+
+func (eNode *NodeExtern) Clone() *NodeExtern {
+ if eNode == nil {
+ return nil
+ }
+ nn := &NodeExtern{
+ Key: eNode.Key,
+ Dir: eNode.Dir,
+ TTL: eNode.TTL,
+ ModifiedIndex: eNode.ModifiedIndex,
+ CreatedIndex: eNode.CreatedIndex,
+ }
+ if eNode.Value != nil {
+ s := *eNode.Value
+ nn.Value = &s
+ }
+ if eNode.Expiration != nil {
+ t := *eNode.Expiration
+ nn.Expiration = &t
+ }
+ if eNode.ExternNodes != nil {
+ nn.ExternNodes = make(NodeExterns, len(eNode.ExternNodes))
+ for i, n := range eNode.ExternNodes {
+ nn.ExternNodes[i] = n.Clone()
+ }
+ }
+ return nn
+}
+
+type NodeExterns []*NodeExtern
+
+// interfaces for sorting
+
+func (ns NodeExterns) Len() int {
+ return len(ns)
+}
+
+func (ns NodeExterns) Less(i, j int) bool {
+ return ns[i].Key < ns[j].Key
+}
+
+func (ns NodeExterns) Swap(i, j int) {
+ ns[i], ns[j] = ns[j], ns[i]
+}
diff --git a/etcd/etcdserver/api/v2store/over_watcher.go b/etcd/etcdserver/api/v2store/over_watcher.go
new file mode 100644
index 00000000000..1e51c908e77
--- /dev/null
+++ b/etcd/etcdserver/api/v2store/over_watcher.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+type Watcher interface {
+ EventChan() chan *Event
+ StartIndex() uint64 // watch创建时的EtcdIndex
+ Remove()
+}
+
+type watcher struct {
+ eventChan chan *Event // 注册之后,通过这个chan返回给监听者
+ stream bool // 是否是流观察、还是一次性观察
+ recursive bool // 是否是递归
+ sinceIndex uint64 // 从那个索引之后开始监听
+ startIndex uint64
+ hub *watcherHub
+ removed bool // 是否移除
+ remove func() // 移除时,回调函数
+}
+
+func (w *watcher) EventChan() chan *Event {
+ return w.eventChan
+}
+
+func (w *watcher) StartIndex() uint64 {
+ return w.startIndex
+}
+
+// notify 函数通知观察者.如果观察者在给定的路径中感兴趣,该函数将返回true.
+func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool {
+ // originalPath 对应 1deleted 对应 3
+
+ // 观察者在三种情况下和一个条件下对路径感兴趣,该条件是事件发生在观察者的sinceIndex之后.
+ // 1.事件发生的路径是观察者正在观察的路径.例如,如果观察者在"/foo "观察,而事件发生在"/foo",观察者必须是对该事件感兴趣.
+ // 2.观察者是一个递归观察者,它对其观察路径之后发生的事件感兴趣.例如,如果观察者A在"/foo "处观察,并且它是一个递归观察者,它将对发生在"/foo/bar "的事件感兴趣.
+ // 3.当我们删除一个目录时,我们需要强制通知所有在我们需要删除的文件处观察的观察者.例如,一个观察者正在观察"/foo/bar".而我们删除了"/foo".即使"/foo "不是它正在监听的路径,该监听者也应该得到通知.
+ if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex {
+ // 如果eventChan的容量已满,我们就不能在这里进行阻塞,否则etcd会挂起.当通知的速率高于我们的发送速率时,eventChan的容量就满了.如果发生这种情况,我们会关闭该通道.
+ select {
+ case w.eventChan <- e:
+ default:
+ w.remove() // 移除、关闭chan
+ }
+ return true
+ }
+ return false
+}
+
+// Remove 移除监听者
+func (w *watcher) Remove() {
+ w.hub.mutex.Lock()
+ defer w.hub.mutex.Unlock()
+
+ close(w.eventChan)
+ if w.remove != nil {
+ w.remove()
+ }
+}
+
+// nopWatcher is a watcher that receives nothing, always blocking.
+type nopWatcher struct{}
+
+func NewNopWatcher() Watcher { return &nopWatcher{} }
+func (w *nopWatcher) EventChan() chan *Event { return nil }
+func (w *nopWatcher) StartIndex() uint64 { return 0 }
+func (w *nopWatcher) Remove() {}
diff --git a/etcd/etcdserver/api/v2store/stats.go b/etcd/etcdserver/api/v2store/stats.go
new file mode 100644
index 00000000000..10e5392a23c
--- /dev/null
+++ b/etcd/etcdserver/api/v2store/stats.go
@@ -0,0 +1,124 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "encoding/json"
+ "sync/atomic"
+)
+
+const (
+ SetSuccess = iota
+ SetFail
+ DeleteSuccess
+ DeleteFail
+ CreateSuccess
+ CreateFail
+ UpdateSuccess
+ UpdateFail
+ CompareAndSwapSuccess
+ CompareAndSwapFail
+ GetSuccess
+ GetFail
+ ExpireCount
+ CompareAndDeleteSuccess
+ CompareAndDeleteFail
+)
+
+// 请求状态记录
+type Stats struct {
+ GetSuccess uint64 `json:"getsSuccess"` // 获取请求的数量
+ GetFail uint64 `json:"getsFail"`
+ SetSuccess uint64 `json:"setsSuccess"` // set 请求数
+ SetFail uint64 `json:"setsFail"`
+ DeleteSuccess uint64 `json:"deleteSuccess"` // delete 请求数
+ DeleteFail uint64 `json:"deleteFail"`
+ UpdateSuccess uint64 `json:"updateSuccess"` // update请求数
+ UpdateFail uint64 `json:"updateFail"`
+ CreateSuccess uint64 `json:"createSuccess"` // create请求数
+ CreateFail uint64 `json:"createFail"`
+ CompareAndSwapSuccess uint64 `json:"compareAndSwapSuccess"` // testAndSet 请求数
+ CompareAndSwapFail uint64 `json:"compareAndSwapFail"`
+ CompareAndDeleteSuccess uint64 `json:"compareAndDeleteSuccess"` // compareAndDelete请求数
+ CompareAndDeleteFail uint64 `json:"compareAndDeleteFail"`
+ ExpireCount uint64 `json:"expireCount"`
+ Watchers uint64 `json:"watchers"`
+}
+
+func newStats() *Stats {
+ s := new(Stats)
+ return s
+}
+
+func (s *Stats) clone() *Stats {
+ return &Stats{
+ GetSuccess: atomic.LoadUint64(&s.GetSuccess),
+ GetFail: atomic.LoadUint64(&s.GetFail),
+ SetSuccess: atomic.LoadUint64(&s.SetSuccess),
+ SetFail: atomic.LoadUint64(&s.SetFail),
+ DeleteSuccess: atomic.LoadUint64(&s.DeleteSuccess),
+ DeleteFail: atomic.LoadUint64(&s.DeleteFail),
+ UpdateSuccess: atomic.LoadUint64(&s.UpdateSuccess),
+ UpdateFail: atomic.LoadUint64(&s.UpdateFail),
+ CreateSuccess: atomic.LoadUint64(&s.CreateSuccess),
+ CreateFail: atomic.LoadUint64(&s.CreateFail),
+ CompareAndSwapSuccess: atomic.LoadUint64(&s.CompareAndSwapSuccess),
+ CompareAndSwapFail: atomic.LoadUint64(&s.CompareAndSwapFail),
+ CompareAndDeleteSuccess: atomic.LoadUint64(&s.CompareAndDeleteSuccess),
+ CompareAndDeleteFail: atomic.LoadUint64(&s.CompareAndDeleteFail),
+ ExpireCount: atomic.LoadUint64(&s.ExpireCount),
+ Watchers: atomic.LoadUint64(&s.Watchers),
+ }
+}
+
+func (s *Stats) toJson() []byte {
+ b, _ := json.Marshal(s)
+ return b
+}
+
+func (s *Stats) Inc(field int) {
+ switch field {
+ case SetSuccess:
+ atomic.AddUint64(&s.SetSuccess, 1)
+ case SetFail:
+ atomic.AddUint64(&s.SetFail, 1)
+ case CreateSuccess:
+ atomic.AddUint64(&s.CreateSuccess, 1)
+ case CreateFail:
+ atomic.AddUint64(&s.CreateFail, 1)
+ case DeleteSuccess:
+ atomic.AddUint64(&s.DeleteSuccess, 1)
+ case DeleteFail:
+ atomic.AddUint64(&s.DeleteFail, 1)
+ case GetSuccess:
+ atomic.AddUint64(&s.GetSuccess, 1)
+ case GetFail:
+ atomic.AddUint64(&s.GetFail, 1)
+ case UpdateSuccess:
+ atomic.AddUint64(&s.UpdateSuccess, 1)
+ case UpdateFail:
+ atomic.AddUint64(&s.UpdateFail, 1)
+ case CompareAndSwapSuccess:
+ atomic.AddUint64(&s.CompareAndSwapSuccess, 1)
+ case CompareAndSwapFail:
+ atomic.AddUint64(&s.CompareAndSwapFail, 1)
+ case CompareAndDeleteSuccess:
+ atomic.AddUint64(&s.CompareAndDeleteSuccess, 1)
+ case CompareAndDeleteFail:
+ atomic.AddUint64(&s.CompareAndDeleteFail, 1)
+ case ExpireCount:
+ atomic.AddUint64(&s.ExpireCount, 1)
+ }
+}
diff --git a/etcd/etcdserver/api/v2store/store.go b/etcd/etcdserver/api/v2store/store.go
new file mode 100644
index 00000000000..dc3300fc0b5
--- /dev/null
+++ b/etcd/etcdserver/api/v2store/store.go
@@ -0,0 +1,741 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+)
+
+// The 当store第一次被初始化时,要设置的默认版本.
+const defaultVersion = 2
+
+var minExpireTime time.Time
+
+func init() {
+ minExpireTime, _ = time.Parse(time.RFC3339, "2000-01-01T00:00:00Z")
+}
+
+// Store Etcd是存储有如下特点:
+// 1、采用kv型数据存储,一般情况下比关系型数据库快.
+// 2、支持动态存储(内存)以及静态存储(磁盘).
+// 3、分布式存储,可集成为多节点集群.
+// 4、存储方式,采用类似目录结构.
+// 1)只有叶子节点才能真正存储数据,相当于文件.
+// 2)叶子节点的父节点一定是目录,目录不能存储数据.
+type Store interface {
+ Version() int
+ Index() uint64
+ Get(nodePath string, recursive, sorted bool) (*Event, error)
+ Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error)
+ Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error)
+ Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error)
+ CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, value string, expireOpts TTLOptionSet) (*Event, error)
+ Delete(nodePath string, dir, recursive bool) (*Event, error)
+ CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error)
+ Watch(prefix string, recursive, stream bool, sinceIndex uint64) (Watcher, error)
+ Save() ([]byte, error)
+ Recovery(state []byte) error
+ Clone() Store
+ SaveNoCopy() ([]byte, error)
+ JsonStats() []byte
+ DeleteExpiredKeys(cutoff time.Time)
+ HasTTLKeys() bool
+}
+
+type TTLOptionSet struct {
+ ExpireTime time.Time // key的有效期
+ Refresh bool
+}
+
+type store struct {
+ Root *node // 根节点
+ WatcherHub *watcherHub // 关于node的所有key的watcher
+ CurrentIndex uint64 // 对应存储内容的index
+ Stats *Stats
+ CurrentVersion int // 最新数据的版本
+ ttlKeyHeap *ttlKeyHeap // 用于数据恢复的(需手动操作) 过期时间的最小堆
+ worldLock sync.RWMutex // 停止当前存储的world锁
+ clock clockwork.Clock //
+ readonlySet types.Set // 只读操作
+}
+
+// New 创建一个存储空间,给定的命名空间将被创建为初始目录.
+func New(namespaces ...string) Store {
+ s := newStore(namespaces...)
+ s.clock = clockwork.NewRealClock()
+ return s
+}
+
+// OK /0 /1
+func newStore(namespaces ...string) *store {
+ s := new(store)
+ s.CurrentVersion = defaultVersion // 2
+ s.Root = newDir(s, "/", s.CurrentIndex, nil, Permanent) // 0 永久性 //创建其在etcd中对应的目录,第一个目录是以(/)
+ for _, namespace := range namespaces {
+ s.Root.Add(newDir(s, namespace, s.CurrentIndex, s.Root, Permanent))
+ }
+ s.Stats = newStats()
+ s.WatcherHub = newWatchHub(1000)
+ s.ttlKeyHeap = newTtlKeyHeap()
+ s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...)
+ return s
+}
+
+// Set creates or replace the node at nodePath.
+func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(SetSuccess)
+ return
+ }
+
+ s.Stats.Inc(SetFail)
+ }()
+
+ // Get prevNode value
+ n, getErr := s.internalGet(nodePath)
+ if getErr != nil && getErr.ErrorCode != v2error.EcodeKeyNotFound {
+ err = getErr
+ return nil, err
+ }
+
+ if expireOpts.Refresh {
+ if getErr != nil {
+ err = getErr
+ return nil, err
+ }
+ value = n.Value
+ }
+
+ // Set new value
+ e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set)
+ if err != nil {
+ return nil, err
+ }
+ e.EtcdIndex = s.CurrentIndex
+
+ // Put prevNode into event
+ if getErr == nil {
+ prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
+ prev.NodeExtern.loadInternalNode(n, false, false, s.clock)
+ e.PrevNode = prev.NodeExtern
+ }
+
+ if !expireOpts.Refresh {
+ s.WatcherHub.notify(e)
+ } else {
+ e.SetRefresh()
+ s.WatcherHub.add(e)
+ }
+
+ return e, nil
+}
+
+// returns user-readable cause of failed comparison
+func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string {
+ switch which {
+ case CompareIndexNotMatch:
+ return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex)
+ case CompareValueNotMatch:
+ return fmt.Sprintf("[%v != %v]", prevValue, n.Value)
+ default:
+ return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex)
+ }
+}
+
+func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
+ value string, expireOpts TTLOptionSet) (*Event, error,
+) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(CompareAndSwapSuccess)
+ return
+ }
+
+ s.Stats.Inc(CompareAndSwapFail)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
+ }
+
+ n, err := s.internalGet(nodePath)
+ if err != nil {
+ return nil, err
+ }
+ if n.IsDir() { // can only compare and swap file
+ err = v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex)
+ return nil, err
+ }
+
+ // If both of the prevValue and prevIndex are given, we will test both of them.
+ // Command will be executed, only if both of the tests are successful.
+ if ok, which := n.Compare(prevValue, prevIndex); !ok {
+ cause := getCompareFailCause(n, which, prevValue, prevIndex)
+ err = v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex)
+ return nil, err
+ }
+
+ if expireOpts.Refresh {
+ value = n.Value
+ }
+
+ // update etcd index
+ s.CurrentIndex++
+
+ e := newEvent(CompareAndSwap, nodePath, s.CurrentIndex, n.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+ eNode := e.NodeExtern
+
+ // if test succeed, write the value
+ if err := n.Write(value, s.CurrentIndex); err != nil {
+ return nil, err
+ }
+ n.UpdateTTL(expireOpts.ExpireTime)
+
+ // copy the value for safety
+ valueCopy := value
+ eNode.Value = &valueCopy
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) // 过期时间和 剩余时间 TTL
+
+ if !expireOpts.Refresh {
+ s.WatcherHub.notify(e)
+ } else {
+ e.SetRefresh()
+ s.WatcherHub.add(e)
+ }
+
+ return e, nil
+}
+
+// Delete 删除节点,并删除该节点包含的 /0/members/8e9e05c52164694d true,true
+func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(DeleteSuccess)
+ return
+ }
+
+ s.Stats.Inc(DeleteFail)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
+ }
+
+ // 递归意味着dir
+ if recursive {
+ dir = true
+ }
+
+ n, err := s.internalGet(nodePath)
+ if err != nil { // 如果该节点不存在,则返回错误
+ return nil, err
+ }
+
+ nextIndex := s.CurrentIndex + 1
+ e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex)
+ e.EtcdIndex = nextIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+ eNode := e.NodeExtern
+
+ if n.IsDir() {
+ eNode.Dir = true
+ }
+ callback := func(path string) {
+ s.WatcherHub.notifyWatchers(e, path, true)
+ }
+
+ err = n.Remove(dir, recursive, callback)
+ if err != nil {
+ return nil, err
+ }
+ s.CurrentIndex++
+ s.WatcherHub.notify(e) // 通知上层
+
+ return e, nil
+}
+
+func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(CompareAndDeleteSuccess)
+ return
+ }
+
+ s.Stats.Inc(CompareAndDeleteFail)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+
+ n, err := s.internalGet(nodePath)
+ if err != nil { // if the node does not exist, return error
+ return nil, err
+ }
+ if n.IsDir() { // can only compare and delete file
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex)
+ }
+
+ // If both of the prevValue and prevIndex are given, we will test both of them.
+ // Command will be executed, only if both of the tests are successful.
+ if ok, which := n.Compare(prevValue, prevIndex); !ok {
+ cause := getCompareFailCause(n, which, prevValue, prevIndex)
+ return nil, v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex)
+ }
+
+ // update etcd index
+ s.CurrentIndex++
+
+ e := newEvent(CompareAndDelete, nodePath, s.CurrentIndex, n.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+
+ callback := func(path string) { // notify function
+ // notify the watchers with deleted set true
+ s.WatcherHub.notifyWatchers(e, path, true)
+ }
+
+ err = n.Remove(false, false, callback)
+ if err != nil {
+ return nil, err
+ }
+
+ s.WatcherHub.notify(e)
+
+ return e, nil
+}
+
+func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Watcher, error) {
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+
+ key = path.Clean(path.Join("/", key))
+ if sinceIndex == 0 {
+ sinceIndex = s.CurrentIndex + 1
+ }
+ // WatcherHub does not know about the current index, so we need to pass it in
+ w, err := s.WatcherHub.watch(key, recursive, stream, sinceIndex, s.CurrentIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ return w, nil
+}
+
+// walk 遍历所有nodePath并在每个目录上应用walkFunc
+func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *v2error.Error)) (*node, *v2error.Error) {
+ components := strings.Split(nodePath, "/")
+
+ curr := s.Root
+ var err *v2error.Error
+
+ for i := 1; i < len(components); i++ {
+ if len(components[i]) == 0 { // 忽略空字符
+ return curr, nil
+ }
+
+ curr, err = walkFunc(curr, components[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return curr, nil
+}
+
+// Update updates the value/ttl of the node.
+// If the node is a file, the value and the ttl can be updated.
+// If the node is a directory, only the ttl can be updated.
+func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(UpdateSuccess)
+ return
+ }
+
+ s.Stats.Inc(UpdateFail)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
+ }
+
+ currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
+
+ n, err := s.internalGet(nodePath)
+ if err != nil { // if the node does not exist, return error
+ return nil, err
+ }
+ if n.IsDir() && len(newValue) != 0 {
+ // if the node is a directory, we cannot update value to non-empty
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex)
+ }
+
+ if expireOpts.Refresh {
+ newValue = n.Value
+ }
+
+ e := newEvent(Update, nodePath, nextIndex, n.CreatedIndex)
+ e.EtcdIndex = nextIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+ eNode := e.NodeExtern
+
+ if err := n.Write(newValue, nextIndex); err != nil {
+ return nil, fmt.Errorf("nodePath %v : %v", nodePath, err)
+ }
+
+ if n.IsDir() {
+ eNode.Dir = true
+ } else {
+ // copy the value for safety
+ newValueCopy := newValue
+ eNode.Value = &newValueCopy
+ }
+
+ // update ttl
+ n.UpdateTTL(expireOpts.ExpireTime)
+
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) // 过期时间和 剩余时间 TTL
+
+ if !expireOpts.Refresh {
+ s.WatcherHub.notify(e)
+ } else {
+ e.SetRefresh()
+ s.WatcherHub.add(e)
+ }
+
+ s.CurrentIndex = nextIndex
+
+ return e, nil
+}
+
+// DeleteExpiredKeys will delete all expired keys
+func (s *store) DeleteExpiredKeys(cutoff time.Time) {
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ for {
+ node := s.ttlKeyHeap.top()
+ if node == nil || node.ExpireTime.After(cutoff) {
+ break
+ }
+
+ s.CurrentIndex++
+ e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.PrevNode = node.Repr(false, false, s.clock)
+ if node.IsDir() {
+ e.NodeExtern.Dir = true
+ }
+
+ callback := func(path string) { // notify function
+ // notify the watchers with deleted set true
+ s.WatcherHub.notifyWatchers(e, path, true)
+ }
+
+ s.ttlKeyHeap.pop()
+ node.Remove(true, true, callback)
+
+ s.Stats.Inc(ExpireCount)
+
+ s.WatcherHub.notify(e)
+ }
+}
+
+// Save saves the static state of the store system.
+// It will not be able to save the state of watchers.
+// It will not save the parent field of the node. Or there will
+// be cyclic dependencies issue for the json package.
+func (s *store) Save() ([]byte, error) {
+ b, err := json.Marshal(s.Clone())
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+func (s *store) SaveNoCopy() ([]byte, error) {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+func (s *store) Clone() Store {
+ s.worldLock.RLock()
+
+ clonedStore := newStore()
+ clonedStore.CurrentIndex = s.CurrentIndex
+ clonedStore.Root = s.Root.Clone()
+ clonedStore.WatcherHub = s.WatcherHub.clone()
+ clonedStore.Stats = s.Stats.clone()
+ clonedStore.CurrentVersion = s.CurrentVersion
+
+ s.worldLock.RUnlock()
+ return clonedStore
+}
+
+// Recovery recovers the store system from a static state
+// It needs to recover the parent field of the nodes.
+// It needs to delete the expired nodes since the saved time and also
+// needs to create monitoring goroutines.
+func (s *store) Recovery(state []byte) error {
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+ err := json.Unmarshal(state, s)
+ if err != nil {
+ return err
+ }
+
+ s.ttlKeyHeap = newTtlKeyHeap()
+
+ s.Root.recoverAndclean()
+ return nil
+}
+
+func (s *store) JsonStats() []byte {
+ s.Stats.Watchers = uint64(s.WatcherHub.count)
+ return s.Stats.toJson()
+}
+
+func (s *store) HasTTLKeys() bool {
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+ return s.ttlKeyHeap.Len() != 0
+}
+
+// ------------------------------------------ OVER --------------------------------------------------------
+
+// checkDir 检查目录存不存在,不存在创建
+func (s *store) checkDir(parent *node, dirName string) (*node, *v2error.Error) {
+ node, ok := parent.Children[dirName]
+ if ok {
+ if node.IsDir() {
+ return node, nil
+ }
+ return nil, v2error.NewError(v2error.EcodeNotDir, node.Path, s.CurrentIndex)
+ }
+ n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent)
+ parent.Children[dirName] = n
+ return n, nil
+}
+
+// /0/members/8e9e05c52164694d/raftAttributes创建节点
+func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool, expireTime time.Time, action string) (*Event, *v2error.Error) {
+ currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
+
+ if unique { // 在节点路径下附加唯一的项目
+ nodePath += "/" + fmt.Sprintf("%020s", strconv.FormatUint(nextIndex, 10))
+ }
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+
+ // 我们不允许用户修改"/".
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", currIndex)
+ }
+
+ if expireTime.Before(minExpireTime) {
+ expireTime = Permanent
+ }
+
+ dirName, nodeName := path.Split(nodePath)
+ d, err := s.walk(dirName, s.checkDir) // 检查节点目录,以及创建
+ if err != nil {
+ s.Stats.Inc(SetFail)
+ err.Index = currIndex
+ return nil, err
+ }
+ // create, /0/members/8e9e05c52164694d/raftAttributes, 1, 1
+ e := newEvent(action, nodePath, nextIndex, nextIndex)
+ eNode := e.NodeExtern
+
+ n, _ := d.GetChild(nodeName)
+
+ if n != nil {
+ if replace {
+ if n.IsDir() {
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex)
+ }
+ e.PrevNode = n.Repr(false, false, s.clock)
+
+ if err := n.Remove(false, false, nil); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, currIndex)
+ }
+ }
+
+ if !dir {
+ valueCopy := value
+ eNode.Value = &valueCopy
+ // 生成新的树节点node,作为叶子节点
+ n = newKV(s, nodePath, value, nextIndex, d, expireTime)
+ } else {
+ eNode.Dir = true
+ n = newDir(s, nodePath, nextIndex, d, expireTime)
+ }
+
+ if err := d.Add(n); err != nil { // 添加父节点中,即挂到map中
+ return nil, err
+ }
+
+ if !n.IsPermanent() { // 存在有效期
+ s.ttlKeyHeap.push(n)
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock) // 过期时间和 剩余时间 TTL
+ }
+
+ s.CurrentIndex = nextIndex
+
+ return e, nil
+}
+
+// Version 检索存储的当前版本. <= CurrentIndex
+func (s *store) Version() int {
+ return s.CurrentVersion
+}
+
+// Index 检索存储的当前索引.
+func (s *store) Index() uint64 {
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+ return s.CurrentIndex
+}
+
+// Get 返回一个get事件.如果递归为真,它将返回节点路径下的所有内容.如果sorted为真,它将按键对内容进行排序.
+func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) {
+ // /0/members
+ var err *v2error.Error
+
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(GetSuccess)
+ return
+ }
+
+ s.Stats.Inc(GetFail)
+ }()
+
+ n, err := s.internalGet(nodePath) // 没有 /0/members 这个node
+ if err != nil {
+ return nil, err
+ }
+
+ e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex // 给事件分配索引
+ e.NodeExtern.loadInternalNode(n, recursive, sorted, s.clock) // 加载node,主要是获取node中数据
+
+ return e, nil
+}
+
+// Create 在nodePath创建节点.创建将有助于创建没有ttl的中间目录.如果该节点已经存在,创建将失败. 如果路径上的任何节点是一个文件,创建将失败.
+func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) {
+ var err *v2error.Error
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(CreateSuccess)
+ return
+ }
+
+ s.Stats.Inc(CreateFail)
+ }()
+
+ // 创建一个内存节点, 有ttl放入ttlKeyHeap, 返回一个创建事件
+ e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create)
+ if err != nil {
+ return nil, err
+ }
+
+ e.EtcdIndex = s.CurrentIndex
+ s.WatcherHub.notify(e) // ✅
+
+ return e, nil
+}
+
+// InternalGet 获取给定nodePath的节点.
+func (s *store) internalGet(nodePath string) (*node, *v2error.Error) {
+ nodePath = path.Clean(path.Join("/", nodePath)) // /0/members
+
+ walkFunc := func(parent *node, name string) (*node, *v2error.Error) {
+ if !parent.IsDir() {
+ err := v2error.NewError(v2error.EcodeNotDir, parent.Path, s.CurrentIndex)
+ return nil, err
+ }
+
+ child, ok := parent.Children[name]
+ if ok {
+ return child, nil
+ }
+
+ return nil, v2error.NewError(v2error.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex)
+ }
+
+ n, err := s.walk(nodePath, walkFunc)
+ if err != nil {
+ return nil, err
+ }
+ return n, nil
+}
diff --git a/server/etcdserver/api/v2store/ttl_key_heap.go b/etcd/etcdserver/api/v2store/ttl_key_heap.go
similarity index 96%
rename from server/etcdserver/api/v2store/ttl_key_heap.go
rename to etcd/etcdserver/api/v2store/ttl_key_heap.go
index 477d2b9f3aa..f5fb5013a52 100644
--- a/server/etcdserver/api/v2store/ttl_key_heap.go
+++ b/etcd/etcdserver/api/v2store/ttl_key_heap.go
@@ -16,7 +16,7 @@ package v2store
import "container/heap"
-// An TTLKeyHeap is a min-heap of TTLKeys order by expiration time
+// TTLKeyHeap 过期时间的最小堆
type ttlKeyHeap struct {
array []*node
keyMap map[*node]int
@@ -77,6 +77,7 @@ func (h *ttlKeyHeap) pop() *node {
return n
}
+// 存入一个有过期时间的node
func (h *ttlKeyHeap) push(x interface{}) {
heap.Push(h, x)
}
diff --git a/etcd/etcdserver/api/v2store/watcher_hub.go b/etcd/etcdserver/api/v2store/watcher_hub.go
new file mode 100644
index 00000000000..21aac559852
--- /dev/null
+++ b/etcd/etcdserver/api/v2store/watcher_hub.go
@@ -0,0 +1,171 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "container/list"
+ "path"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+)
+
+// A watcherHub 一个watcherHub包含所有订阅的watcher,watcher是一个以watched路径为key,以watcher为值的map,
+// EventHistory为watcherHub保存旧的事件.
+// 它被用来帮助watcher获得一个连续的事件历史.观察者可能会错过在第一个观察命令结束和第二个命令开始之间发生的事件.
+type watcherHub struct {
+ count int64 // 当前的监听者数量
+ mutex sync.Mutex //
+ watchers map[string]*list.List // 所有在xxx目录下的监听者
+ EventHistory *EventHistory // 历史事件
+}
+
+// newWatchHub 创建一个watcherHub.容量决定了我们将在eventHistory中保留多少个事件.
+func newWatchHub(capacity int) *watcherHub {
+ return &watcherHub{
+ watchers: make(map[string]*list.List),
+ EventHistory: newEventHistory(capacity),
+ }
+}
+
+// Watch 返回一个Watcher.
+func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *v2error.Error) {
+ event, err := wh.EventHistory.scan(key, recursive, index)
+ if err != nil {
+ err.Index = storeIndex
+ return nil, err
+ }
+
+ w := &watcher{
+ eventChan: make(chan *Event, 100), // use a buffered channel
+ recursive: recursive,
+ stream: stream,
+ sinceIndex: index,
+ startIndex: storeIndex,
+ hub: wh,
+ }
+
+ wh.mutex.Lock()
+ defer wh.mutex.Unlock()
+ // If the event exists in the known history, append the EtcdIndex and return immediately
+ if event != nil {
+ ne := event.Clone()
+ ne.EtcdIndex = storeIndex
+ w.eventChan <- ne
+ return w, nil
+ }
+
+ l, ok := wh.watchers[key]
+
+ var elem *list.Element
+
+ if ok { // add the new watcher to the back of the list
+ elem = l.PushBack(w)
+ } else { // create a new list and add the new watcher
+ l = list.New()
+ elem = l.PushBack(w)
+ wh.watchers[key] = l
+ }
+
+ w.remove = func() {
+ if w.removed { // avoid removing it twice
+ return
+ }
+ w.removed = true
+ l.Remove(elem)
+ atomic.AddInt64(&wh.count, -1)
+ if l.Len() == 0 {
+ delete(wh.watchers, key)
+ }
+ }
+
+ atomic.AddInt64(&wh.count, 1)
+ return w, nil
+}
+
+func (wh *watcherHub) add(e *Event) {
+ wh.EventHistory.addEvent(e)
+}
+
+// notify 接收一个事件,通知watcher
+func (wh *watcherHub) notify(e *Event) {
+ e = wh.EventHistory.addEvent(e)
+ segments := strings.Split(e.NodeExtern.Key, "/") // /0/members/8e9e05c52164694d/raftAttributes
+ currPath := "/"
+ // if the path is "/foo/bar", --> "/","/foo", "/foo/bar"
+ for _, segment := range segments {
+ currPath = path.Join(currPath, segment)
+ // 通知对当前路径变化 感兴趣的观察者
+ // e.NodeExtern.Key /0/members/8e9e05c52164694d/raftAttributes
+ // nodePath : /
+ // nodePath : /0
+ // nodePath : /0/members
+ // nodePath : /0/members/8e9e05c52164694d
+ // nodePath : /0/members/8e9e05c52164694d/raftAttributes
+ wh.notifyWatchers(e, currPath, false)
+ }
+}
+
+// ok
+func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) {
+ wh.mutex.Lock()
+ defer wh.mutex.Unlock()
+
+ l, ok := wh.watchers[nodePath]
+ if ok {
+ curr := l.Front()
+ // e.NodeExtern.Key /0/members/8e9e05c52164694d/raftAttributes
+ // nodePath : /0/members/8e9e05c52164694d/raftAttributes
+ for curr != nil {
+ next := curr.Next()
+ w, _ := curr.Value.(*watcher)
+ originalPath := e.NodeExtern.Key == nodePath
+ // 是不是起源,或者该目录不是隐藏节点
+ if (originalPath || !isHidden(nodePath, e.NodeExtern.Key)) && w.notify(e, originalPath, deleted) {
+ if !w.stream {
+ // 如果不是流观察者---> 删除,-1
+ w.removed = true
+ l.Remove(curr)
+ atomic.AddInt64(&wh.count, -1)
+ }
+ }
+ curr = next
+ }
+
+ if l.Len() == 0 {
+ // 通知之后,就删除
+ delete(wh.watchers, nodePath)
+ }
+ }
+}
+
+func (wh *watcherHub) clone() *watcherHub {
+ clonedHistory := wh.EventHistory.clone()
+
+ return &watcherHub{
+ EventHistory: clonedHistory,
+ }
+}
+
+// isHidden 检查关键路径是否被认为是隐藏的观察路径,即最后一个元素是隐藏的,或者它在一个隐藏的目录中.
+func isHidden(watchPath, keyPath string) bool {
+ if len(watchPath) > len(keyPath) {
+ return false
+ }
+ afterPath := path.Clean("/" + keyPath[len(watchPath):]) // 去后边的路径
+ return strings.Contains(afterPath, "/_")
+}
diff --git a/etcd/etcdserver/api/v2v3/cluster.go b/etcd/etcdserver/api/v2v3/cluster.go
new file mode 100644
index 00000000000..acb3d0fedf0
--- /dev/null
+++ b/etcd/etcdserver/api/v2v3/cluster.go
@@ -0,0 +1,31 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+
+ "github.com/coreos/go-semver/semver"
+)
+
+func (s *v2v3Server) ID() types.ID {
+ // TODO: use an actual member ID
+ return types.ID(0xe7cd2f00d)
+}
+func (s *v2v3Server) ClientURLs() []string { panic("STUB") }
+func (s *v2v3Server) Members() []*membership.Member { panic("STUB") }
+func (s *v2v3Server) Member(id types.ID) *membership.Member { panic("STUB") }
+func (s *v2v3Server) Version() *semver.Version { panic("STUB") }
diff --git a/etcd/etcdserver/api/v2v3/doc.go b/etcd/etcdserver/api/v2v3/doc.go
new file mode 100644
index 00000000000..2ff372f1876
--- /dev/null
+++ b/etcd/etcdserver/api/v2v3/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2v3 provides a ServerV2 implementation backed by clientv3.Client.
+package v2v3
diff --git a/etcd/etcdserver/api/v2v3/server.go b/etcd/etcdserver/api/v2v3/server.go
new file mode 100644
index 00000000000..48a2ad8a9be
--- /dev/null
+++ b/etcd/etcdserver/api/v2v3/server.go
@@ -0,0 +1,131 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+type fakeStats struct{}
+
+func (s *fakeStats) SelfStats() []byte { return nil }
+func (s *fakeStats) LeaderStats() []byte { return nil }
+func (s *fakeStats) StoreStats() []byte { return nil }
+
+type v2v3Server struct {
+ lg *zap.Logger
+ c *clientv3.Client
+ store *v2v3Store
+ fakeStats
+}
+
+func NewServer(lg *zap.Logger, c *clientv3.Client, pfx string) etcdserver.ServerPeer {
+ return &v2v3Server{lg: lg, c: c, store: newStore(c, pfx)}
+}
+
+func (s *v2v3Server) ClientCertAuthEnabled() bool { return false }
+
+func (s *v2v3Server) LeaseHandler() http.Handler { panic("STUB: lease handler") }
+func (s *v2v3Server) RaftHandler() http.Handler { panic("STUB: raft handler") }
+
+func (s *v2v3Server) Leader() types.ID {
+ ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
+ defer cancel()
+ resp, err := s.c.Status(ctx, s.c.Endpoints()[0])
+ if err != nil {
+ return 0
+ }
+ return types.ID(resp.Leader)
+}
+
+func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+ // adding member as learner is not supported by V2 Server.
+ resp, err := s.c.MemberAdd(ctx, memb.PeerURLs)
+ if err != nil {
+ return nil, err
+ }
+ return v3MembersToMembership(resp.Members), nil
+}
+
+func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ resp, err := s.c.MemberRemove(ctx, id)
+ if err != nil {
+ return nil, err
+ }
+ return v3MembersToMembership(resp.Members), nil
+}
+
+func (s *v2v3Server) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ resp, err := s.c.MemberPromote(ctx, id)
+ if err != nil {
+ return nil, err
+ }
+ return v3MembersToMembership(resp.Members), nil
+}
+
+func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) {
+ resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs)
+ if err != nil {
+ return nil, err
+ }
+ return v3MembersToMembership(resp.Members), nil
+}
+
+func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member {
+ membs := make([]*membership.Member, len(v3membs))
+ for i, m := range v3membs {
+ membs[i] = &membership.Member{
+ ID: types.ID(m.ID),
+ RaftAttributes: membership.RaftAttributes{
+ PeerURLs: m.PeerURLs,
+ IsLearner: m.IsLearner,
+ },
+ Attributes: membership.Attributes{
+ Name: m.Name,
+ ClientURLs: m.ClientURLs,
+ },
+ }
+ }
+ return membs
+}
+
+func (s *v2v3Server) ClusterVersion() *semver.Version { return s.Version() }
+func (s *v2v3Server) Cluster() api.Cluster { return s }
+func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil }
+func (s *v2v3Server) LeaderChangedNotify() <-chan struct{} { return nil }
+
+func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
+ applier := etcdserver.NewApplierV2(s.lg, s.store, nil)
+ reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier)
+ req := (*etcdserver.RequestV2)(&r)
+ resp, err := req.Handle(ctx, reqHandler)
+ if resp.Err != nil {
+ return resp, resp.Err
+ }
+ return resp, err
+}
diff --git a/etcd/etcdserver/api/v2v3/store.go b/etcd/etcdserver/api/v2v3/store.go
new file mode 100644
index 00000000000..bff3162a532
--- /dev/null
+++ b/etcd/etcdserver/api/v2v3/store.go
@@ -0,0 +1,621 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+ "context"
+ "fmt"
+ "path"
+ "sort"
+ "strings"
+ "time"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+)
+
+// store 使用v3客户端实现V2的存储接口.
+type v2v3Store struct {
+ c *clientv3.Client
+ // pfx 是应该存储钥匙的v3前缀.
+ pfx string
+ ctx context.Context
+}
+
+const maxPathDepth = 63
+
+var errUnsupported = fmt.Errorf("TTLs are unsupported")
+
+func NewStore(c *clientv3.Client, pfx string) v2store.Store { return newStore(c, pfx) }
+
+func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} }
+
+func (s *v2v3Store) Index() uint64 { panic("STUB") }
+
+func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*v2store.Event, error) {
+ key := s.mkPath(nodePath)
+ resp, err := s.c.Txn(s.ctx).Then(
+ clientv3.OpGet(key+"/"),
+ clientv3.OpGet(key),
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+
+ if kvs := resp.Responses[0].GetResponseRange().Kvs; len(kvs) != 0 || isRoot(nodePath) {
+ nodes, err := s.getDir(nodePath, recursive, sorted, resp.Header.Revision)
+ if err != nil {
+ return nil, err
+ }
+ cidx, midx := uint64(0), uint64(0)
+ if len(kvs) > 0 {
+ cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision)
+ }
+ return &v2store.Event{
+ Action: v2store.Get,
+ NodeExtern: &v2store.NodeExtern{
+ Key: nodePath,
+ Dir: true,
+ ExternNodes: nodes,
+ CreatedIndex: cidx,
+ ModifiedIndex: midx,
+ },
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+ }
+
+ kvs := resp.Responses[1].GetResponseRange().Kvs
+ if len(kvs) == 0 {
+ return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+
+ return &v2store.Event{
+ Action: v2store.Get,
+ NodeExtern: s.mkV2Node(kvs[0]),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*v2store.NodeExtern, error) {
+ rootNodes, err := s.getDirDepth(nodePath, 1, rev)
+ if err != nil || !recursive {
+ if sorted {
+ sort.Sort(v2store.NodeExterns(rootNodes))
+ }
+ return rootNodes, err
+ }
+ nextNodes := rootNodes
+ nodes := make(map[string]*v2store.NodeExtern)
+ // Breadth walk the subdirectories
+ for i := 2; len(nextNodes) > 0; i++ {
+ for _, n := range nextNodes {
+ nodes[n.Key] = n
+ if parent := nodes[path.Dir(n.Key)]; parent != nil {
+ parent.ExternNodes = append(parent.ExternNodes, n)
+ }
+ }
+ if nextNodes, err = s.getDirDepth(nodePath, i, rev); err != nil {
+ return nil, err
+ }
+ }
+
+ if sorted {
+ sort.Sort(v2store.NodeExterns(rootNodes))
+ }
+ return rootNodes, nil
+}
+
+func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*v2store.NodeExtern, error) {
+ pd := s.mkPathDepth(nodePath, depth)
+ resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev))
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := make([]*v2store.NodeExtern, len(resp.Kvs))
+ for i, kv := range resp.Kvs {
+ nodes[i] = s.mkV2Node(kv)
+ }
+ return nodes, nil
+}
+
+func (s *v2v3Store) Set(nodePath string, dir bool, value string, expireOpts v2store.TTLOptionSet,
+) (*v2store.Event, error) {
+ if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+ return nil, errUnsupported
+ }
+
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+
+ ecode := 0
+ applyf := func(stm concurrency.STM) error {
+ // build path if any directories in path do not exist
+ dirs := []string{}
+ for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) {
+ pp := s.mkPath(p)
+ if stm.Rev(pp) > 0 {
+ ecode = v2error.EcodeNotDir
+ return nil
+ }
+ if stm.Rev(pp+"/") == 0 {
+ dirs = append(dirs, pp+"/")
+ }
+ }
+ for _, d := range dirs {
+ stm.Put(d, "")
+ }
+
+ key := s.mkPath(nodePath)
+ if dir {
+ if stm.Rev(key) != 0 {
+ // exists as non-dir
+ ecode = v2error.EcodeNotDir
+ return nil
+ }
+ key = key + "/"
+ } else if stm.Rev(key+"/") != 0 {
+ ecode = v2error.EcodeNotFile
+ return nil
+ }
+ stm.Put(key, value, clientv3.WithPrevKV())
+ stm.Put(s.mkActionKey(), v2store.Set)
+ return nil
+ }
+
+ resp, err := s.newSTM(applyf)
+ if err != nil {
+ return nil, err
+ }
+ if ecode != 0 {
+ return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+
+ createRev := resp.Header.Revision
+ var pn *v2store.NodeExtern
+ if pkv := prevKeyFromPuts(resp); pkv != nil {
+ pn = s.mkV2Node(pkv)
+ createRev = pkv.CreateRevision
+ }
+
+ vp := &value
+ if dir {
+ vp = nil
+ }
+ return &v2store.Event{
+ Action: v2store.Set,
+ NodeExtern: &v2store.NodeExtern{
+ Key: nodePath,
+ Value: vp,
+ Dir: dir,
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ CreatedIndex: mkV2Rev(createRev),
+ },
+ PrevNode: pn,
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+// Update 更新节点属性, 例如将islearner变成false
+func (s *v2v3Store) Update(nodePath, newValue string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+
+ if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+ return nil, errUnsupported
+ }
+
+ key := s.mkPath(nodePath)
+ ecode := 0
+ applyf := func(stm concurrency.STM) error {
+ if rev := stm.Rev(key + "/"); rev != 0 {
+ ecode = v2error.EcodeNotFile
+ return nil
+ }
+ if rev := stm.Rev(key); rev == 0 {
+ ecode = v2error.EcodeKeyNotFound
+ return nil
+ }
+ stm.Put(key, newValue, clientv3.WithPrevKV())
+ stm.Put(s.mkActionKey(), v2store.Update)
+ return nil
+ }
+
+ resp, err := s.newSTM(applyf)
+ if err != nil {
+ return nil, err
+ }
+ if ecode != 0 {
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+
+ pkv := prevKeyFromPuts(resp)
+ return &v2store.Event{
+ Action: v2store.Update,
+ NodeExtern: &v2store.NodeExtern{
+ Key: nodePath,
+ Value: &newValue,
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ CreatedIndex: mkV2Rev(pkv.CreateRevision),
+ },
+ PrevNode: s.mkV2Node(pkv),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) Create(nodePath string, dir bool, value string, unique bool, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+ if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+ return nil, errUnsupported
+ }
+ ecode := 0
+ applyf := func(stm concurrency.STM) error {
+ ecode = 0
+ key := s.mkPath(nodePath)
+ if unique {
+ // append unique item under the node path
+ for {
+ key = nodePath + "/" + fmt.Sprintf("%020s", time.Now())
+ key = path.Clean(path.Join("/", key))
+ key = s.mkPath(key)
+ if stm.Rev(key) == 0 {
+ break
+ }
+ }
+ }
+ if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 {
+ ecode = v2error.EcodeNodeExist
+ return nil
+ }
+ // build path if any directories in path do not exist
+ dirs := []string{}
+ for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) {
+ pp := s.mkPath(p)
+ if stm.Rev(pp) > 0 {
+ ecode = v2error.EcodeNotDir
+ return nil
+ }
+ if stm.Rev(pp+"/") == 0 {
+ dirs = append(dirs, pp+"/")
+ }
+ }
+ for _, d := range dirs {
+ stm.Put(d, "")
+ }
+
+ if dir {
+ // directories marked with extra slash in key name
+ key += "/"
+ }
+ stm.Put(key, value)
+ stm.Put(s.mkActionKey(), v2store.Create)
+ return nil
+ }
+
+ resp, err := s.newSTM(applyf)
+ if err != nil {
+ return nil, err
+ }
+ if ecode != 0 {
+ return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+
+ var v *string
+ if !dir {
+ v = &value
+ }
+
+ return &v2store.Event{
+ Action: v2store.Create,
+ NodeExtern: &v2store.NodeExtern{
+ Key: nodePath,
+ Value: v,
+ Dir: dir,
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ CreatedIndex: mkV2Rev(resp.Header.Revision),
+ },
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, value string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+ if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+ return nil, errUnsupported
+ }
+
+ key := s.mkPath(nodePath)
+ resp, err := s.c.Txn(s.ctx).If(
+ s.mkCompare(nodePath, prevValue, prevIndex)...,
+ ).Then(
+ clientv3.OpPut(key, value, clientv3.WithPrevKV()),
+ clientv3.OpPut(s.mkActionKey(), v2store.CompareAndSwap),
+ ).Else(
+ clientv3.OpGet(key),
+ clientv3.OpGet(key+"/"),
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, compareFail(nodePath, prevValue, prevIndex, resp)
+ }
+
+ pkv := resp.Responses[0].GetResponsePut().PrevKv
+ return &v2store.Event{
+ Action: v2store.CompareAndSwap,
+ NodeExtern: &v2store.NodeExtern{
+ Key: nodePath,
+ Value: &value,
+ CreatedIndex: mkV2Rev(pkv.CreateRevision),
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ },
+ PrevNode: s.mkV2Node(pkv),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+ if !dir && !recursive {
+ return s.deleteNode(nodePath)
+ }
+ if !recursive {
+ return s.deleteEmptyDir(nodePath)
+ }
+
+ dels := make([]clientv3.Op, maxPathDepth+1)
+ dels[0] = clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV())
+ for i := 1; i < maxPathDepth; i++ {
+ dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix())
+ }
+ dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), v2store.Delete)
+
+ resp, err := s.c.Txn(s.ctx).If(
+ clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0),
+ clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, maxPathDepth)+"/"), "=", 0),
+ ).Then(
+ dels...,
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ dresp := resp.Responses[0].GetResponseDeleteRange()
+ return &v2store.Event{
+ Action: v2store.Delete,
+ PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) deleteEmptyDir(nodePath string) (*v2store.Event, error) {
+ resp, err := s.c.Txn(s.ctx).If(
+ clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(),
+ ).Then(
+ clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()),
+ clientv3.OpPut(s.mkActionKey(), v2store.Delete),
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, v2error.NewError(v2error.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ dresp := resp.Responses[0].GetResponseDeleteRange()
+ if len(dresp.PrevKvs) == 0 {
+ return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ return &v2store.Event{
+ Action: v2store.Delete,
+ PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) deleteNode(nodePath string) (*v2store.Event, error) {
+ resp, err := s.c.Txn(s.ctx).If(
+ clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0),
+ ).Then(
+ clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()),
+ clientv3.OpPut(s.mkActionKey(), v2store.Delete),
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs
+ if len(pkvs) == 0 {
+ return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ pkv := pkvs[0]
+ return &v2store.Event{
+ Action: v2store.Delete,
+ NodeExtern: &v2store.NodeExtern{
+ Key: nodePath,
+ CreatedIndex: mkV2Rev(pkv.CreateRevision),
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ },
+ PrevNode: s.mkV2Node(pkv),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+
+ key := s.mkPath(nodePath)
+ resp, err := s.c.Txn(s.ctx).If(
+ s.mkCompare(nodePath, prevValue, prevIndex)...,
+ ).Then(
+ clientv3.OpDelete(key, clientv3.WithPrevKV()),
+ clientv3.OpPut(s.mkActionKey(), v2store.CompareAndDelete),
+ ).Else(
+ clientv3.OpGet(key),
+ clientv3.OpGet(key+"/"),
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, compareFail(nodePath, prevValue, prevIndex, resp)
+ }
+
+ // len(pkvs) > 1 since txn only succeeds when key exists
+ pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0]
+ return &v2store.Event{
+ Action: v2store.CompareAndDelete,
+ NodeExtern: &v2store.NodeExtern{
+ Key: nodePath,
+ CreatedIndex: mkV2Rev(pkv.CreateRevision),
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ },
+ PrevNode: s.mkV2Node(pkv),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error {
+ if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 {
+ return v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ kvs := resp.Responses[0].GetResponseRange().Kvs
+ if len(kvs) == 0 {
+ return v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ kv := kvs[0]
+ indexMatch := prevIndex == 0 || kv.ModRevision == int64(prevIndex)
+ valueMatch := prevValue == "" || string(kv.Value) == prevValue
+ var cause string
+ switch {
+ case indexMatch && !valueMatch:
+ cause = fmt.Sprintf("[%v != %v]", prevValue, string(kv.Value))
+ case valueMatch && !indexMatch:
+ cause = fmt.Sprintf("[%v != %v]", prevIndex, kv.ModRevision)
+ default:
+ cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision)
+ }
+ return v2error.NewError(v2error.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision))
+}
+
+func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp {
+ key := s.mkPath(nodePath)
+ cmps := []clientv3.Cmp{clientv3.Compare(clientv3.Version(key), ">", 0)}
+ if prevIndex != 0 {
+ cmps = append(cmps, clientv3.Compare(clientv3.ModRevision(key), "=", mkV3Rev(prevIndex)))
+ }
+ if prevValue != "" {
+ cmps = append(cmps, clientv3.Compare(clientv3.Value(key), "=", prevValue))
+ }
+ return cmps
+}
+
+func (s *v2v3Store) JsonStats() []byte { panic("STUB") }
+func (s *v2v3Store) DeleteExpiredKeys(cutoff time.Time) { panic("STUB") }
+
+func (s *v2v3Store) Version() int { return 2 }
+
+// TODO: move this out of the Store interface?
+
+func (s *v2v3Store) Save() ([]byte, error) { panic("STUB") }
+func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") }
+func (s *v2v3Store) Clone() v2store.Store { panic("STUB") }
+func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") }
+func (s *v2v3Store) HasTTLKeys() bool { panic("STUB") }
+
+func (s *v2v3Store) mkPath(nodePath string) string { return s.mkPathDepth(nodePath, 0) }
+
+func (s *v2v3Store) mkNodePath(p string) string {
+ return path.Clean(p[len(s.pfx)+len("/k/000/"):])
+}
+
+// mkPathDepth makes a path to a key that encodes its directory depth
+// for fast directory listing. If a depth is provided, it is added
+// to the computed depth.
+func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string {
+ normalForm := path.Clean(path.Join("/", nodePath))
+ n := strings.Count(normalForm, "/") + depth
+ return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm)
+}
+
+func (s *v2v3Store) mkActionKey() string { return s.pfx + "/act" }
+
+func isRoot(s string) bool { return len(s) == 0 || s == "/" || s == "/0" || s == "/1" }
+
+func mkV2Rev(v3Rev int64) uint64 {
+ if v3Rev == 0 {
+ return 0
+ }
+ return uint64(v3Rev - 1)
+}
+
+func mkV3Rev(v2Rev uint64) int64 {
+ if v2Rev == 0 {
+ return 0
+ }
+ return int64(v2Rev + 1)
+}
+
+// mkV2Node creates a V2 NodeExtern from a V3 KeyValue
+func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *v2store.NodeExtern {
+ if kv == nil {
+ return nil
+ }
+ n := &v2store.NodeExtern{
+ Key: s.mkNodePath(string(kv.Key)),
+ Dir: kv.Key[len(kv.Key)-1] == '/',
+ CreatedIndex: mkV2Rev(kv.CreateRevision),
+ ModifiedIndex: mkV2Rev(kv.ModRevision),
+ }
+ if !n.Dir {
+ v := string(kv.Value)
+ n.Value = &v
+ }
+ return n
+}
+
+// prevKeyFromPuts gets the prev key that is being put; ignores
+// the put action response.
+func prevKeyFromPuts(resp *clientv3.TxnResponse) *mvccpb.KeyValue {
+ for _, r := range resp.Responses {
+ pkv := r.GetResponsePut().PrevKv
+ if pkv != nil && pkv.CreateRevision > 0 {
+ return pkv
+ }
+ }
+ return nil
+}
+
+func (s *v2v3Store) newSTM(applyf func(concurrency.STM) error) (*clientv3.TxnResponse, error) {
+ return concurrency.NewSTM(s.c, applyf, concurrency.WithIsolation(concurrency.Serializable))
+}
diff --git a/etcd/etcdserver/api/v2v3/watcher.go b/etcd/etcdserver/api/v2v3/watcher.go
new file mode 100644
index 00000000000..f4f25890f06
--- /dev/null
+++ b/etcd/etcdserver/api/v2v3/watcher.go
@@ -0,0 +1,143 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+ "context"
+ "strings"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2error"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+)
+
+func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (v2store.Watcher, error) {
+ ctx, cancel := context.WithCancel(s.ctx)
+ wch := s.c.Watch(
+ ctx,
+ // TODO: very pricey; use a single store-wide watch in future
+ s.pfx,
+ clientv3.WithPrefix(),
+ clientv3.WithRev(int64(sinceIndex)),
+ clientv3.WithCreatedNotify(),
+ clientv3.WithPrevKV())
+ resp, ok := <-wch
+ if err := resp.Err(); err != nil || !ok {
+ cancel()
+ return nil, v2error.NewError(v2error.EcodeRaftInternal, prefix, 0)
+ }
+
+ evc, donec := make(chan *v2store.Event), make(chan struct{})
+ go func() {
+ defer func() {
+ close(evc)
+ close(donec)
+ }()
+ for resp := range wch {
+ for _, ev := range s.mkV2Events(resp) {
+ k := ev.NodeExtern.Key
+ if recursive {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+ // accept events on hidden keys given in prefix
+ k = strings.Replace(k, prefix, "/", 1)
+ // ignore hidden keys deeper than prefix
+ if strings.Contains(k, "/_") {
+ continue
+ }
+ }
+ if !recursive && k != prefix {
+ continue
+ }
+ select {
+ case evc <- ev:
+ case <-ctx.Done():
+ return
+ }
+ if !stream {
+ return
+ }
+ }
+ }
+ }()
+
+ return &v2v3Watcher{
+ startRev: resp.Header.Revision,
+ evc: evc,
+ donec: donec,
+ cancel: cancel,
+ }, nil
+}
+
+func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*v2store.Event) {
+ ak := s.mkActionKey()
+ for _, rev := range mkRevs(wr) {
+ var act, key *clientv3.Event
+ for _, ev := range rev {
+ if string(ev.Kv.Key) == ak {
+ act = ev
+ } else if key != nil && len(key.Kv.Key) < len(ev.Kv.Key) {
+ // use longest key to ignore intermediate new
+ // directories from Create.
+ key = ev
+ } else if key == nil {
+ key = ev
+ }
+ }
+ if act != nil && act.Kv != nil && key != nil {
+ v2ev := &v2store.Event{
+ Action: string(act.Kv.Value),
+ NodeExtern: s.mkV2Node(key.Kv),
+ PrevNode: s.mkV2Node(key.PrevKv),
+ EtcdIndex: mkV2Rev(wr.Header.Revision),
+ }
+ evs = append(evs, v2ev)
+ }
+ }
+ return evs
+}
+
+func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) {
+ var curRev []*clientv3.Event
+ for _, ev := range wr.Events {
+ if curRev != nil && ev.Kv.ModRevision != curRev[0].Kv.ModRevision {
+ revs = append(revs, curRev)
+ curRev = nil
+ }
+ curRev = append(curRev, ev)
+ }
+ if curRev != nil {
+ revs = append(revs, curRev)
+ }
+ return revs
+}
+
+type v2v3Watcher struct {
+ startRev int64
+ evc chan *v2store.Event
+ donec chan struct{}
+ cancel context.CancelFunc
+}
+
+func (w *v2v3Watcher) StartIndex() uint64 { return mkV2Rev(w.startRev) }
+
+func (w *v2v3Watcher) Remove() {
+ w.cancel()
+ <-w.donec
+}
+
+func (w *v2v3Watcher) EventChan() chan *v2store.Event { return w.evc }
diff --git a/etcd/etcdserver/api/v3alarm/over_alarms.go b/etcd/etcdserver/api/v3alarm/over_alarms.go
new file mode 100644
index 00000000000..4e24214dc76
--- /dev/null
+++ b/etcd/etcdserver/api/v3alarm/over_alarms.go
@@ -0,0 +1,164 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3alarm manages health status alarms in etcd.
+package v3alarm
+
+import (
+ "sync"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+)
+
+type BackendGetter interface {
+ Backend() backend.Backend
+}
+
+type alarmSet map[types.ID]*pb.AlarmMember
+
+type AlarmStore struct {
+ lg *zap.Logger
+ mu sync.Mutex
+ types map[pb.AlarmType]alarmSet
+ // {
+ // "AlarmType_NONE": {
+ // },
+ // "AlarmType_NOSPACE": {
+ // "1": {
+ // "MemberID": "1",
+ // "AlarmType": "AlarmType_NOSPACE"
+ // }
+ // },
+ // "AlarmType_CORRUPT": {}
+ //}
+ bg BackendGetter
+}
+
+func NewAlarmStore(lg *zap.Logger, bg BackendGetter) (*AlarmStore, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), bg: bg}
+ err := ret.restore()
+ return ret, err
+}
+
+// Activate 记录、入库警报
+func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at}
+ if m := a.addToMap(newAlarm); m != newAlarm {
+ return m
+ }
+
+ v, err := newAlarm.Marshal()
+ if err != nil {
+ a.lg.Panic("序列化报警成员失败", zap.Error(err))
+ }
+
+ b := a.bg.Backend()
+ b.BatchTx().Lock()
+ b.BatchTx().UnsafePut(buckets.Alarm, v, nil)
+ b.BatchTx().Unlock()
+
+ return newAlarm
+}
+
+func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ t := a.types[at]
+ if t == nil {
+ t = make(alarmSet)
+ a.types[at] = t
+ }
+ m := t[id]
+ if m == nil {
+ return nil
+ }
+
+ delete(t, id)
+
+ v, err := m.Marshal()
+ if err != nil {
+ a.lg.Panic("反序列化报警成员失败", zap.Error(err))
+ }
+
+ b := a.bg.Backend()
+ b.BatchTx().Lock()
+ b.BatchTx().UnsafeDelete(buckets.Alarm, v)
+ b.BatchTx().Unlock()
+
+ return m
+}
+
+// Get 获取指定类型的警报,NONE 是所有
+func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ if at == pb.AlarmType_NONE {
+ for _, t := range a.types {
+ for _, m := range t {
+ ret = append(ret, m)
+ }
+ }
+ return ret
+ }
+ for _, m := range a.types[at] {
+ ret = append(ret, m)
+ }
+ return ret
+}
+
+func (a *AlarmStore) restore() error {
+ b := a.bg.Backend()
+ tx := b.BatchTx()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(buckets.Alarm)
+ err := tx.UnsafeForEach(buckets.Alarm, func(k, v []byte) error {
+ var m pb.AlarmMember
+ if err := m.Unmarshal(k); err != nil {
+ return err
+ }
+ a.addToMap(&m)
+ return nil
+ })
+ tx.Unlock()
+
+ b.ForceCommit()
+ return err
+}
+
+func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember {
+ t := a.types[newAlarm.Alarm]
+ if t == nil {
+ t = make(alarmSet)
+ a.types[newAlarm.Alarm] = t
+ }
+ m := t[types.ID(newAlarm.MemberID)]
+ if m != nil {
+ return m
+ }
+ t[types.ID(newAlarm.MemberID)] = newAlarm
+ return newAlarm
+}
diff --git a/etcd/etcdserver/api/v3client/doc.go b/etcd/etcdserver/api/v3client/doc.go
new file mode 100644
index 00000000000..0c532019904
--- /dev/null
+++ b/etcd/etcdserver/api/v3client/doc.go
@@ -0,0 +1,46 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3client provides clientv3 interfaces from an etcdserver.
+//
+// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New:
+//
+// import (
+// "github.com/ls-2018/etcd_cn/client_sdk/v3"
+// "context"
+//
+// "github.com/ls-2018/etcd_cn/etcd/embed"
+// "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3client"
+// )
+//
+// ...
+//
+// // create an embedded EtcdServer from the default configuration
+// cfg := embed.NewConfig()
+// cfg.Dir = "default.etcd"
+// e, err := embed.StartEtcd(cfg)
+// if err != nil {
+// // handle error!
+// }
+//
+// // wrap the EtcdServer with v3client
+// cli := v3client.New(e.Server)
+//
+// // use like an ordinary clientv3
+// resp, err := cli.Put(context.TODO(), "some-key", "it works!")
+// if err != nil {
+// // handle error!
+// }
+//
+package v3client
diff --git a/server/etcdserver/api/v3client/v3client.go b/etcd/etcdserver/api/v3client/v3client.go
similarity index 89%
rename from server/etcdserver/api/v3client/v3client.go
rename to etcd/etcdserver/api/v3client/v3client.go
index c44479ffad2..a67e0c6fbd5 100644
--- a/server/etcdserver/api/v3client/v3client.go
+++ b/etcd/etcdserver/api/v3client/v3client.go
@@ -18,15 +18,16 @@ import (
"context"
"time"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
- "go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3rpc"
+ "github.com/ls-2018/etcd_cn/etcd/proxy/grpcproxy/adapter"
)
// New creates a clientv3 client that wraps an in-process EtcdServer. Instead
// of making gRPC calls through sockets, the client makes direct function calls
-// to the etcd server through its api/v3rpc function interfaces.
+// to the etcd etcd through its api/v3rpc function interfaces.
func New(s *etcdserver.EtcdServer) *clientv3.Client {
c := clientv3.NewCtxClient(context.Background(), clientv3.WithZapLogger(s.Logger()))
diff --git a/server/etcdserver/api/v3compactor/compactor.go b/etcd/etcdserver/api/v3compactor/compactor.go
similarity index 97%
rename from server/etcdserver/api/v3compactor/compactor.go
rename to etcd/etcdserver/api/v3compactor/compactor.go
index e352670c12b..f7f46871395 100644
--- a/server/etcdserver/api/v3compactor/compactor.go
+++ b/etcd/etcdserver/api/v3compactor/compactor.go
@@ -19,7 +19,7 @@ import (
"fmt"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
"github.com/jonboulle/clockwork"
"go.uber.org/zap"
diff --git a/server/etcdserver/api/v3compactor/doc.go b/etcd/etcdserver/api/v3compactor/doc.go
similarity index 100%
rename from server/etcdserver/api/v3compactor/doc.go
rename to etcd/etcdserver/api/v3compactor/doc.go
diff --git a/server/etcdserver/api/v3compactor/periodic.go b/etcd/etcdserver/api/v3compactor/periodic.go
similarity index 88%
rename from server/etcdserver/api/v3compactor/periodic.go
rename to etcd/etcdserver/api/v3compactor/periodic.go
index 853c1a9e7f3..e87797ac197 100644
--- a/server/etcdserver/api/v3compactor/periodic.go
+++ b/etcd/etcdserver/api/v3compactor/periodic.go
@@ -19,8 +19,8 @@ import (
"sync"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
"github.com/jonboulle/clockwork"
"go.uber.org/zap"
@@ -54,9 +54,8 @@ func newPeriodic(lg *zap.Logger, clock clockwork.Clock, h time.Duration, rg RevG
period: h,
rg: rg,
c: c,
+ revs: make([]int64, 0),
}
- // revs won't be longer than the retentions.
- pc.revs = make([]int64, 0, pc.getRetentions())
pc.ctx, pc.cancel = context.WithCancel(context.Background())
return pc
}
@@ -67,7 +66,7 @@ Compaction period 1-hour:
2. record revisions for every 1/10 of 1-hour (6-minute)
3. keep recording revisions with no compaction for first 1-hour
4. do compact with revs[0]
- - success? continue on for-loop and move sliding window; revs = revs[1:]
+ - success? contiue on for-loop and move sliding window; revs = revs[1:]
- failure? update revs, and retry after 1/10 of 1-hour (6-minute)
Compaction period 24-hour:
@@ -75,7 +74,7 @@ Compaction period 24-hour:
2. record revisions for every 1/10 of 1-hour (6-minute)
3. keep recording revisions with no compaction for first 24-hour
4. do compact with revs[0]
- - success? continue on for-loop and move sliding window; revs = revs[1:]
+ - success? contiue on for-loop and move sliding window; revs = revs[1:]
- failure? update revs, and retry after 1/10 of 1-hour (6-minute)
Compaction period 59-min:
@@ -83,7 +82,7 @@ Compaction period 59-min:
2. record revisions for every 1/10 of 59-min (5.9-min)
3. keep recording revisions with no compaction for first 59-min
4. do compact with revs[0]
- - success? continue on for-loop and move sliding window; revs = revs[1:]
+ - success? contiue on for-loop and move sliding window; revs = revs[1:]
- failure? update revs, and retry after 1/10 of 59-min (5.9-min)
Compaction period 5-sec:
@@ -91,7 +90,7 @@ Compaction period 5-sec:
2. record revisions for every 1/10 of 5-sec (0.5-sec)
3. keep recording revisions with no compaction for first 5-sec
4. do compact with revs[0]
- - success? continue on for-loop and move sliding window; revs = revs[1:]
+ - success? contiue on for-loop and move sliding window; revs = revs[1:]
- failure? update revs, and retry after 1/10 of 5-sec (0.5-sec)
*/
@@ -102,7 +101,6 @@ func (pc *Periodic) Run() {
retentions := pc.getRetentions()
go func() {
- lastRevision := int64(0)
lastSuccess := pc.clock.Now()
baseInterval := pc.period
for {
@@ -115,15 +113,15 @@ func (pc *Periodic) Run() {
case <-pc.ctx.Done():
return
case <-pc.clock.After(retryInterval):
- pc.mu.RLock()
+ pc.mu.Lock()
p := pc.paused
- pc.mu.RUnlock()
+ pc.mu.Unlock()
if p {
continue
}
}
- rev := pc.revs[0]
- if pc.clock.Now().Sub(lastSuccess) < baseInterval || rev == lastRevision {
+
+ if pc.clock.Now().Sub(lastSuccess) < baseInterval {
continue
}
@@ -131,6 +129,7 @@ func (pc *Periodic) Run() {
if baseInterval == pc.period {
baseInterval = compactInterval
}
+ rev := pc.revs[0]
pc.lg.Info(
"starting auto periodic compaction",
@@ -146,7 +145,6 @@ func (pc *Periodic) Run() {
zap.Duration("compact-period", pc.period),
zap.Duration("took", pc.clock.Now().Sub(startTime)),
)
- lastRevision = rev
lastSuccess = pc.clock.Now()
} else {
pc.lg.Warn(
diff --git a/etcd/etcdserver/api/v3compactor/revision.go b/etcd/etcdserver/api/v3compactor/revision.go
new file mode 100644
index 00000000000..9a30feb77e5
--- /dev/null
+++ b/etcd/etcdserver/api/v3compactor/revision.go
@@ -0,0 +1,130 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3compactor
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+)
+
+// Revision compacts the log by purging revisions older than
+// the configured reivison number. Compaction happens every 5 minutes.
+type Revision struct {
+ lg *zap.Logger
+
+ clock clockwork.Clock
+ retention int64
+
+ rg RevGetter
+ c Compactable
+
+ ctx context.Context
+ cancel context.CancelFunc
+
+ mu sync.Mutex
+ paused bool
+}
+
+// newRevision creates a new instance of Revisonal compactor that purges
+// the log older than retention revisions from the current revision.
+func newRevision(lg *zap.Logger, clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision {
+ rc := &Revision{
+ lg: lg,
+ clock: clock,
+ retention: retention,
+ rg: rg,
+ c: c,
+ }
+ rc.ctx, rc.cancel = context.WithCancel(context.Background())
+ return rc
+}
+
+const revInterval = 5 * time.Minute
+
+// Run runs revision-based compactor.
+func (rc *Revision) Run() {
+ prev := int64(0)
+ go func() {
+ for {
+ select {
+ case <-rc.ctx.Done():
+ return
+ case <-rc.clock.After(revInterval):
+ rc.mu.Lock()
+ p := rc.paused
+ rc.mu.Unlock()
+ if p {
+ continue
+ }
+ }
+
+ rev := rc.rg.Rev() - rc.retention
+ if rev <= 0 || rev == prev {
+ continue
+ }
+
+ now := time.Now()
+ rc.lg.Info(
+ "starting auto revision compaction",
+ zap.Int64("revision", rev),
+ zap.Int64("revision-compaction-retention", rc.retention),
+ )
+ _, err := rc.c.Compact(rc.ctx, &pb.CompactionRequest{Revision: rev})
+ if err == nil || err == mvcc.ErrCompacted {
+ prev = rev
+ rc.lg.Info(
+ "completed auto revision compaction",
+ zap.Int64("revision", rev),
+ zap.Int64("revision-compaction-retention", rc.retention),
+ zap.Duration("took", time.Since(now)),
+ )
+ } else {
+ rc.lg.Warn(
+ "failed auto revision compaction",
+ zap.Int64("revision", rev),
+ zap.Int64("revision-compaction-retention", rc.retention),
+ zap.Duration("retry-interval", revInterval),
+ zap.Error(err),
+ )
+ }
+ }
+ }()
+}
+
+// Stop stops revision-based compactor.
+func (rc *Revision) Stop() {
+ rc.cancel()
+}
+
+// Pause pauses revision-based compactor.
+func (rc *Revision) Pause() {
+ rc.mu.Lock()
+ rc.paused = true
+ rc.mu.Unlock()
+}
+
+// Resume resumes revision-based compactor.
+func (rc *Revision) Resume() {
+ rc.mu.Lock()
+ rc.paused = false
+ rc.mu.Unlock()
+}
diff --git a/server/etcdserver/api/v3election/doc.go b/etcd/etcdserver/api/v3election/doc.go
similarity index 100%
rename from server/etcdserver/api/v3election/doc.go
rename to etcd/etcdserver/api/v3election/doc.go
diff --git a/etcd/etcdserver/api/v3election/election.go b/etcd/etcdserver/api/v3election/election.go
new file mode 100644
index 00000000000..f21a8efc213
--- /dev/null
+++ b/etcd/etcdserver/api/v3election/election.go
@@ -0,0 +1,137 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3election
+
+import (
+ "context"
+ "errors"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+ epb "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb"
+)
+
+// ErrMissingLeaderKey is returned when election API request
+// is missing the "leader" field.
+var ErrMissingLeaderKey = errors.New(`"leader" field必须是provided`)
+
+type electionServer struct {
+ c *clientv3.Client
+}
+
+// NewElectionServer 选举server
+func NewElectionServer(c *clientv3.Client) epb.ElectionServer {
+ return &electionServer{c}
+}
+
+func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) {
+ s, err := es.session(ctx, req.Lease)
+ if err != nil {
+ return nil, err
+ }
+ // 创建用于选举的Session,有效时间可以根据实际情况设置
+ e := concurrency.NewElection(s, string(req.Name))
+ if err = e.Campaign(ctx, string(req.Value)); err != nil {
+ return nil, err
+ }
+ return &epb.CampaignResponse{
+ Header: e.Header(),
+ Leader: &epb.LeaderKey{
+ Name: req.Name,
+ Key: []byte(e.Key()),
+ Rev: e.Rev(),
+ Lease: int64(s.Lease()),
+ },
+ }, nil
+}
+
+func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
+ if req.Leader == nil {
+ return nil, ErrMissingLeaderKey
+ }
+ s, err := es.session(ctx, req.Leader.Lease)
+ if err != nil {
+ return nil, err
+ }
+ e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
+ if err := e.Proclaim(ctx, string(req.Value)); err != nil {
+ return nil, err
+ }
+ return &epb.ProclaimResponse{Header: e.Header()}, nil
+}
+
+func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error {
+ s, err := es.session(stream.Context(), -1)
+ if err != nil {
+ return err
+ }
+ e := concurrency.NewElection(s, string(req.Name))
+ ch := e.Observe(stream.Context())
+ for stream.Context().Err() == nil {
+ select {
+ case <-stream.Context().Done():
+ case resp, ok := <-ch:
+ if !ok {
+ return nil
+ }
+ lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]}
+ if err := stream.Send(lresp); err != nil {
+ return err
+ }
+ }
+ }
+ return stream.Context().Err()
+}
+
+func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) {
+ s, err := es.session(ctx, -1)
+ if err != nil {
+ return nil, err
+ }
+ l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx)
+ if lerr != nil {
+ return nil, lerr
+ }
+ return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil
+}
+
+func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
+ if req.Leader == nil {
+ return nil, ErrMissingLeaderKey
+ }
+ s, err := es.session(ctx, req.Leader.Lease)
+ if err != nil {
+ return nil, err
+ }
+ e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
+ if err := e.Resign(ctx); err != nil {
+ return nil, err
+ }
+ return &epb.ResignResponse{Header: e.Header()}, nil
+}
+
+func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) {
+ s, err := concurrency.NewSession(
+ es.c,
+ concurrency.WithLease(clientv3.LeaseID(lease)),
+ concurrency.WithContext(ctx),
+ )
+ if err != nil {
+ return nil, err
+ }
+ s.Orphan()
+ return s, nil
+}
diff --git a/server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
similarity index 92%
rename from server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
rename to etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
index 432fb9c4477..28d383e42bd 100644
--- a/server/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
+++ b/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
@@ -1,5 +1,5 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: server/etcdserver/api/v3election/v3electionpb/v3election.proto
+// Code generated by protoc-gen-grpc-gateway.
+// source: etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
/*
Package v3electionpb is a reverse proxy.
@@ -10,10 +10,11 @@ package gw
import (
"context"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
"io"
"net/http"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb"
+
"github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
@@ -21,18 +22,19 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// Suppress "imported and not used" errors
var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-var _ = descriptor.ForMessage
-var _ = metadata.Join
+
+var (
+ _ io.Reader
+ _ status.Status
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = descriptor.ForMessage
+)
func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v3electionpb.CampaignRequest
@@ -48,7 +50,6 @@ func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshale
msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
-
}
func local_request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -65,7 +66,6 @@ func local_request_Election_Campaign_0(ctx context.Context, marshaler runtime.Ma
msg, err := server.Campaign(ctx, &protoReq)
return msg, metadata, err
-
}
func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -82,7 +82,6 @@ func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshale
msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
-
}
func local_request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -99,7 +98,6 @@ func local_request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Ma
msg, err := server.Proclaim(ctx, &protoReq)
return msg, metadata, err
-
}
func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -116,7 +114,6 @@ func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler,
msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
-
}
func local_request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -133,7 +130,6 @@ func local_request_Election_Leader_0(ctx context.Context, marshaler runtime.Mars
msg, err := server.Leader(ctx, &protoReq)
return msg, metadata, err
-
}
func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) {
@@ -158,7 +154,6 @@ func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler
}
metadata.HeaderMD = header
return stream, metadata, nil
-
}
func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -175,7 +170,6 @@ func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler,
msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
-
}
func local_request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -192,20 +186,15 @@ func local_request_Election_Resign_0(ctx context.Context, marshaler runtime.Mars
msg, err := server.Resign(ctx, &protoReq)
return msg, metadata, err
-
}
// v3electionpb.RegisterElectionHandlerServer registers the http handlers for service Election to "mux".
// UnaryRPC :call v3electionpb.ElectionServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
-// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterElectionHandlerFromEndpoint instead.
func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3electionpb.ElectionServer) error {
-
mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
- var stream runtime.ServerTransportStream
- ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
if err != nil {
@@ -213,7 +202,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
return
}
resp, md, err := local_request_Election_Campaign_0(rctx, inboundMarshaler, server, req, pathParams)
- md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
@@ -221,14 +209,11 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
}
forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
- var stream runtime.ServerTransportStream
- ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
if err != nil {
@@ -236,7 +221,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
return
}
resp, md, err := local_request_Election_Proclaim_0(rctx, inboundMarshaler, server, req, pathParams)
- md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
@@ -244,14 +228,11 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
}
forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
- var stream runtime.ServerTransportStream
- ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
if err != nil {
@@ -259,7 +240,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
return
}
resp, md, err := local_request_Election_Leader_0(rctx, inboundMarshaler, server, req, pathParams)
- md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
@@ -267,7 +247,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
}
forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@@ -280,8 +259,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
- var stream runtime.ServerTransportStream
- ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
if err != nil {
@@ -289,7 +266,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
return
}
resp, md, err := local_request_Election_Resign_0(rctx, inboundMarshaler, server, req, pathParams)
- md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
@@ -297,7 +273,6 @@ func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, s
}
forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
return nil
@@ -340,7 +315,6 @@ func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *g
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ElectionClient" to call the correct interceptors.
func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
-
mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -358,7 +332,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c
}
forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@@ -378,7 +351,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c
}
forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@@ -398,7 +370,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c
}
forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@@ -418,7 +389,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c
}
forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
})
mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@@ -438,7 +408,6 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c
}
forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
return nil
diff --git a/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
new file mode 100644
index 00000000000..3947ffe4224
--- /dev/null
+++ b/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
@@ -0,0 +1,748 @@
+package v3electionpb
+
+// source: v3election.proto
+
+import (
+ context "context"
+ "encoding/json"
+ fmt "fmt"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ mvccpb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ etcdserverpb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+
+var (
+ _ = fmt.Errorf
+ _ = math.Inf
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type CampaignRequest struct {
+ // name is the election's identifier for the campaign.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // lease is the ID of the lease attached to leadership of the election. If the
+ // lease expires or is revoked before resigning leadership, then the
+ // leadership is transferred to the next campaigner, if any.
+ Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
+ // value is the initial proclaimed value set when the campaigner wins the
+ // election.
+ Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *CampaignRequest) Reset() { *m = CampaignRequest{} }
+func (m *CampaignRequest) String() string { return proto.CompactTextString(m) }
+func (*CampaignRequest) ProtoMessage() {}
+func (*CampaignRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{0}
+}
+
+func (m *CampaignRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *CampaignRequest) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+func (m *CampaignRequest) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type CampaignResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // leader describes the resources used for holding leadereship of the election.
+ Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CampaignResponse) Reset() { *m = CampaignResponse{} }
+func (m *CampaignResponse) String() string { return proto.CompactTextString(m) }
+func (*CampaignResponse) ProtoMessage() {}
+func (*CampaignResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{1}
+}
+
+func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *CampaignResponse) GetLeader() *LeaderKey {
+ if m != nil {
+ return m.Leader
+ }
+ return nil
+}
+
+type LeaderKey struct {
+ // name is the election identifier that correponds to the leadership key.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // key is an opaque key representing the ownership of the election. If the key
+ // is deleted, then leadership is lost.
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ // rev is the creation revision of the key. It can be used to test for ownership
+ // of an election during transactions by testing the key's creation revision
+ // matches rev.
+ Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"`
+ // lease is the lease ID of the election leader.
+ Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"`
+}
+
+func (m *LeaderKey) Reset() { *m = LeaderKey{} }
+func (m *LeaderKey) String() string { return proto.CompactTextString(m) }
+func (*LeaderKey) ProtoMessage() {}
+func (*LeaderKey) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{2}
+}
+
+func (m *LeaderKey) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *LeaderKey) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *LeaderKey) GetRev() int64 {
+ if m != nil {
+ return m.Rev
+ }
+ return 0
+}
+
+func (m *LeaderKey) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+type LeaderRequest struct {
+ // name is the election identifier for the leadership information.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *LeaderRequest) Reset() { *m = LeaderRequest{} }
+func (m *LeaderRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaderRequest) ProtoMessage() {}
+func (*LeaderRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{3}
+}
+
+func (m *LeaderRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+type LeaderResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // kv is the key-value pair representing the latest leader update.
+ Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaderResponse) Reset() { *m = LeaderResponse{} }
+func (m *LeaderResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaderResponse) ProtoMessage() {}
+func (*LeaderResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{4}
+}
+
+func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaderResponse) GetKv() *mvccpb.KeyValue {
+ if m != nil {
+ return m.Kv
+ }
+ return nil
+}
+
+type ResignRequest struct {
+ // leader is the leadership to relinquish by resignation.
+ Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResignRequest) Reset() { *m = ResignRequest{} }
+func (m *ResignRequest) String() string { return proto.CompactTextString(m) }
+func (*ResignRequest) ProtoMessage() {}
+func (*ResignRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{5}
+}
+
+func (m *ResignRequest) GetLeader() *LeaderKey {
+ if m != nil {
+ return m.Leader
+ }
+ return nil
+}
+
+type ResignResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResignResponse) Reset() { *m = ResignResponse{} }
+func (m *ResignResponse) String() string { return proto.CompactTextString(m) }
+func (*ResignResponse) ProtoMessage() {}
+func (*ResignResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{6}
+}
+
+func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type ProclaimRequest struct {
+ // leader is the leadership hold on the election.
+ Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"`
+ // value is an update meant to overwrite the leader's current value.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} }
+func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) }
+func (*ProclaimRequest) ProtoMessage() {}
+func (*ProclaimRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{7}
+}
+
+func (m *ProclaimRequest) GetLeader() *LeaderKey {
+ if m != nil {
+ return m.Leader
+ }
+ return nil
+}
+
+func (m *ProclaimRequest) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type ProclaimResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} }
+func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) }
+func (*ProclaimResponse) ProtoMessage() {}
+func (*ProclaimResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{8}
+}
+
+func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest")
+ proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse")
+ proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey")
+ proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest")
+ proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse")
+ proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest")
+ proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse")
+ proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest")
+ proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse")
+}
+
+func init() { proto.RegisterFile("v3election.proto", fileDescriptor_c9b1f26cc432a035) }
+
+var fileDescriptor_c9b1f26cc432a035 = []byte{
+ // 531 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
+ 0x10, 0xc6, 0x59, 0x27, 0x84, 0x32, 0xa4, 0xad, 0x65, 0x82, 0x08, 0x21, 0xb8, 0xd1, 0x72, 0xa9,
+ 0x72, 0xb0, 0x51, 0xc3, 0x29, 0x27, 0x04, 0x02, 0x55, 0x2a, 0x12, 0xe0, 0x03, 0x82, 0xe3, 0xda,
+ 0x1d, 0xb9, 0x91, 0x1d, 0xaf, 0xb1, 0x5d, 0x4b, 0xb9, 0xf2, 0x0a, 0x1c, 0xe0, 0x91, 0x38, 0x22,
+ 0xf1, 0x02, 0x28, 0xf0, 0x20, 0x68, 0x77, 0xed, 0xfa, 0x8f, 0x12, 0x84, 0x9a, 0xdb, 0x78, 0xe7,
+ 0xdb, 0xf9, 0xcd, 0x37, 0x3b, 0x09, 0xe8, 0xf9, 0x0c, 0x43, 0xf4, 0xb2, 0x05, 0x8f, 0xac, 0x38,
+ 0xe1, 0x19, 0x37, 0xfa, 0xd5, 0x49, 0xec, 0x8e, 0x06, 0x3e, 0xf7, 0xb9, 0x4c, 0xd8, 0x22, 0x52,
+ 0x9a, 0xd1, 0x11, 0x66, 0xde, 0xb9, 0xcd, 0xe2, 0x85, 0x2d, 0x82, 0x14, 0x93, 0x1c, 0x93, 0xd8,
+ 0xb5, 0x93, 0xd8, 0x2b, 0x04, 0xc3, 0x2b, 0xc1, 0x32, 0xf7, 0xbc, 0xd8, 0xb5, 0x83, 0xbc, 0xc8,
+ 0x8c, 0x7d, 0xce, 0xfd, 0x10, 0x65, 0x8e, 0x45, 0x11, 0xcf, 0x98, 0x20, 0xa5, 0x2a, 0x4b, 0xdf,
+ 0xc1, 0xe1, 0x0b, 0xb6, 0x8c, 0xd9, 0xc2, 0x8f, 0x1c, 0xfc, 0x74, 0x89, 0x69, 0x66, 0x18, 0xd0,
+ 0x8d, 0xd8, 0x12, 0x87, 0x64, 0x42, 0x8e, 0xfb, 0x8e, 0x8c, 0x8d, 0x01, 0xdc, 0x0c, 0x91, 0xa5,
+ 0x38, 0xd4, 0x26, 0xe4, 0xb8, 0xe3, 0xa8, 0x0f, 0x71, 0x9a, 0xb3, 0xf0, 0x12, 0x87, 0x1d, 0x29,
+ 0x55, 0x1f, 0x74, 0x05, 0x7a, 0x55, 0x32, 0x8d, 0x79, 0x94, 0xa2, 0xf1, 0x14, 0x7a, 0x17, 0xc8,
+ 0xce, 0x31, 0x91, 0x55, 0xef, 0x9c, 0x8c, 0xad, 0xba, 0x0f, 0xab, 0xd4, 0x9d, 0x4a, 0x8d, 0x53,
+ 0x68, 0x0d, 0x1b, 0x7a, 0xa1, 0xba, 0xa5, 0xc9, 0x5b, 0xf7, 0xad, 0xfa, 0xa8, 0xac, 0xd7, 0x32,
+ 0x77, 0x86, 0x2b, 0xa7, 0x90, 0xd1, 0x8f, 0x70, 0xfb, 0xea, 0x70, 0xa3, 0x0f, 0x1d, 0x3a, 0x01,
+ 0xae, 0x64, 0xb9, 0xbe, 0x23, 0x42, 0x71, 0x92, 0x60, 0x2e, 0x1d, 0x74, 0x1c, 0x11, 0x56, 0x5e,
+ 0xbb, 0x35, 0xaf, 0xf4, 0x31, 0xec, 0xab, 0xd2, 0xff, 0x18, 0x13, 0xbd, 0x80, 0x83, 0x52, 0xb4,
+ 0x93, 0xf1, 0x09, 0x68, 0x41, 0x5e, 0x98, 0xd6, 0x2d, 0xf5, 0xa2, 0xd6, 0x19, 0xae, 0xde, 0x8b,
+ 0x01, 0x3b, 0x5a, 0x90, 0xd3, 0x67, 0xb0, 0xef, 0x60, 0x5a, 0x7b, 0xb5, 0x6a, 0x56, 0xe4, 0xff,
+ 0x66, 0xf5, 0x0a, 0x0e, 0xca, 0x0a, 0xbb, 0xf4, 0x4a, 0x3f, 0xc0, 0xe1, 0xdb, 0x84, 0x7b, 0x21,
+ 0x5b, 0x2c, 0xaf, 0xdb, 0x4b, 0xb5, 0x48, 0x5a, 0x7d, 0x91, 0x4e, 0x41, 0xaf, 0x2a, 0xef, 0xd2,
+ 0xe3, 0xc9, 0xd7, 0x2e, 0xec, 0xbd, 0x2c, 0x1a, 0x30, 0x02, 0xd8, 0x2b, 0xf7, 0xd3, 0x78, 0xd4,
+ 0xec, 0xac, 0xf5, 0x53, 0x18, 0x99, 0xdb, 0xd2, 0x8a, 0x42, 0x27, 0x9f, 0x7f, 0xfe, 0xf9, 0xa2,
+ 0x8d, 0xe8, 0x3d, 0x3b, 0x9f, 0xd9, 0xa5, 0xd0, 0xf6, 0x0a, 0xd9, 0x9c, 0x4c, 0x05, 0xac, 0xf4,
+ 0xd0, 0x86, 0xb5, 0xa6, 0xd6, 0x86, 0xb5, 0xad, 0x6f, 0x81, 0xc5, 0x85, 0x4c, 0xc0, 0x3c, 0xe8,
+ 0xa9, 0xd9, 0x1a, 0x0f, 0x37, 0x4d, 0xbc, 0x04, 0x8d, 0x37, 0x27, 0x0b, 0x8c, 0x29, 0x31, 0x43,
+ 0x7a, 0xb7, 0x81, 0x51, 0x0f, 0x25, 0x20, 0x3e, 0xdc, 0x7a, 0xe3, 0xca, 0x81, 0xef, 0x42, 0x39,
+ 0x92, 0x94, 0x07, 0x74, 0xd0, 0xa0, 0x70, 0x55, 0x78, 0x4e, 0xa6, 0x4f, 0x88, 0x70, 0xa3, 0x16,
+ 0xb4, 0xcd, 0x69, 0x2c, 0x7e, 0x9b, 0xd3, 0xdc, 0xe9, 0x2d, 0x6e, 0x12, 0x29, 0x9a, 0x93, 0xe9,
+ 0x73, 0xfd, 0xfb, 0xda, 0x24, 0x3f, 0xd6, 0x26, 0xf9, 0xb5, 0x36, 0xc9, 0xb7, 0xdf, 0xe6, 0x0d,
+ 0xb7, 0x27, 0xff, 0x18, 0x67, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xe6, 0x7c, 0x66, 0xa9,
+ 0x05, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// ElectionClient is the client API for Election service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ElectionClient interface {
+ // Campaign waits to acquire leadership in an election, returning a LeaderKey
+ // representing the leadership if successful. The LeaderKey can then be used
+ // to issue new values on the election, transactionally guard API requests on
+ // leadership still being held, and resign from the election.
+ Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error)
+ // Proclaim updates the leader's posted value with a new value.
+ Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error)
+ // Leader returns the current election proclamation, if any.
+ Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error)
+ // Observe streams election proclamations in-order as made by the election's
+ // elected leaders.
+ Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error)
+ // Resign releases election leadership so other campaigners may acquire
+ // leadership on the election.
+ Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error)
+}
+
+type electionClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewElectionClient(cc *grpc.ClientConn) ElectionClient {
+ return &electionClient{cc}
+}
+
+func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) {
+ out := new(CampaignResponse)
+ err := c.cc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) {
+ out := new(ProclaimResponse)
+ err := c.cc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) {
+ out := new(LeaderResponse)
+ err := c.cc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Election_serviceDesc.Streams[0], "/v3electionpb.Election/Observe", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &electionObserveClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Election_ObserveClient interface {
+ Recv() (*LeaderResponse, error)
+ grpc.ClientStream
+}
+
+type electionObserveClient struct {
+ grpc.ClientStream
+}
+
+func (x *electionObserveClient) Recv() (*LeaderResponse, error) {
+ m := new(LeaderResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) {
+ out := new(ResignResponse)
+ err := c.cc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ElectionServer is the etcd API for Election service.
+type ElectionServer interface {
+ // Campaign waits to acquire leadership in an election, returning a LeaderKey
+ // representing the leadership if successful. The LeaderKey can then be used
+ // to issue new values on the election, transactionally guard API requests on
+ // leadership still being held, and resign from the election.
+ Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error)
+ // Proclaim updates the leader's posted value with a new value.
+ Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error)
+ // Leader returns the current election proclamation, if any.
+ Leader(context.Context, *LeaderRequest) (*LeaderResponse, error)
+ // Observe streams election proclamations in-order as made by the election's
+ // elected leaders.
+ Observe(*LeaderRequest, Election_ObserveServer) error
+ // Resign releases election leadership so other campaigners may acquire
+ // leadership on the election.
+ Resign(context.Context, *ResignRequest) (*ResignResponse, error)
+}
+
+// UnimplementedElectionServer can be embedded to have forward compatible implementations.
+type UnimplementedElectionServer struct{}
+
+func (*UnimplementedElectionServer) Campaign(ctx context.Context, req *CampaignRequest) (*CampaignResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Campaign not implemented")
+}
+
+func (*UnimplementedElectionServer) Proclaim(ctx context.Context, req *ProclaimRequest) (*ProclaimResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Proclaim not implemented")
+}
+
+func (*UnimplementedElectionServer) Leader(ctx context.Context, req *LeaderRequest) (*LeaderResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Leader not implemented")
+}
+
+func (*UnimplementedElectionServer) Observe(req *LeaderRequest, srv Election_ObserveServer) error {
+ return status.Errorf(codes.Unimplemented, "method Observe not implemented")
+}
+
+func (*UnimplementedElectionServer) Resign(ctx context.Context, req *ResignRequest) (*ResignResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Resign not implemented")
+}
+
+func RegisterElectionServer(s *grpc.Server, srv ElectionServer) {
+ s.RegisterService(&_Election_serviceDesc, srv)
+}
+
+func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CampaignRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Campaign(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Campaign",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ProclaimRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Proclaim(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Proclaim",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Leader(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Leader",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(LeaderRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(ElectionServer).Observe(m, &electionObserveServer{stream})
+}
+
+type Election_ObserveServer interface {
+ Send(*LeaderResponse) error
+ grpc.ServerStream
+}
+
+type electionObserveServer struct {
+ grpc.ServerStream
+}
+
+func (x *electionObserveServer) Send(m *LeaderResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ResignRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Resign(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Resign",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Election_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "v3electionpb.Election",
+ HandlerType: (*ElectionServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Campaign",
+ Handler: _Election_Campaign_Handler,
+ },
+ {
+ MethodName: "Proclaim",
+ Handler: _Election_Proclaim_Handler,
+ },
+ {
+ MethodName: "Leader",
+ Handler: _Election_Leader_Handler,
+ },
+ {
+ MethodName: "Resign",
+ Handler: _Election_Resign_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Observe",
+ Handler: _Election_Observe_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "v3election.proto",
+}
+
+func (m *CampaignRequest) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *CampaignResponse) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *LeaderKey) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *LeaderRequest) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *LeaderResponse) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *ResignRequest) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *ResignResponse) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *CampaignRequest) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *CampaignResponse) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *LeaderKey) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *LeaderRequest) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *LeaderResponse) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *ResignRequest) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *ResignResponse) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *ProclaimRequest) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *ProclaimResponse) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func sovV3Election(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+
+func (m *CampaignRequest) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *CampaignResponse) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *LeaderKey) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *LeaderRequest) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *LeaderResponse) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *ResignRequest) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *ResignResponse) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *ProclaimRequest) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *ProclaimResponse) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+var (
+ ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupV3Election = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/server/etcdserver/api/v3election/v3electionpb/v3election.proto b/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
similarity index 100%
rename from server/etcdserver/api/v3election/v3electionpb/v3election.proto
rename to etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
diff --git a/server/etcdserver/api/v3lock/doc.go b/etcd/etcdserver/api/v3lock/doc.go
similarity index 100%
rename from server/etcdserver/api/v3lock/doc.go
rename to etcd/etcdserver/api/v3lock/doc.go
diff --git a/etcd/etcdserver/api/v3lock/lock.go b/etcd/etcdserver/api/v3lock/lock.go
new file mode 100644
index 00000000000..5f63b8962b2
--- /dev/null
+++ b/etcd/etcdserver/api/v3lock/lock.go
@@ -0,0 +1,57 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3lock
+
+import (
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb"
+)
+
+type lockServer struct {
+ c *clientv3.Client
+}
+
+func NewLockServer(c *clientv3.Client) v3lockpb.LockServer {
+ return &lockServer{c}
+}
+
+func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
+ s, err := concurrency.NewSession(
+ ls.c,
+ concurrency.WithLease(clientv3.LeaseID(req.Lease)),
+ concurrency.WithContext(ctx),
+ )
+ if err != nil {
+ return nil, err
+ }
+ s.Orphan()
+ m := concurrency.NewMutex(s, string(req.Name))
+ if err = m.Lock(ctx); err != nil {
+ return nil, err
+ }
+ return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil
+}
+
+func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
+ resp, err := ls.c.Delete(ctx, string(req.Key))
+ if err != nil {
+ return nil, err
+ }
+ return &v3lockpb.UnlockResponse{Header: resp.Header}, nil
+}
diff --git a/server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
similarity index 90%
rename from server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
rename to etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
index 27be5acc558..26594f21d4a 100644
--- a/server/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
+++ b/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
@@ -1,5 +1,5 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: server/etcdserver/api/v3lock/v3lockpb/v3lock.proto
+// Code generated by protoc-gen-grpc-gateway.
+// source: etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
/*
Package v3lockpb is a reverse proxy.
@@ -10,10 +10,11 @@ package gw
import (
"context"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
"io"
"net/http"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb"
+
"github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
@@ -21,18 +22,19 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// Suppress "imported and not used" errors
var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-var _ = descriptor.ForMessage
-var _ = metadata.Join
+
+var (
+ _ io.Reader
+ _ status.Status
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = descriptor.ForMessage
+)
func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq v3lockpb.LockRequest
@@ -48,7 +50,6 @@ func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, clien
msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
-
}
func local_request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, server v3lockpb.LockServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -65,7 +66,6 @@ func local_request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler,
msg, err := server.Lock(ctx, &protoReq)
return msg, metadata, err
-
}
func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -82,7 +82,6 @@ func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, cli
msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
-
}
func local_request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, server v3lockpb.LockServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -99,20 +98,15 @@ func local_request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshale
msg, err := server.Unlock(ctx, &protoReq)
return msg, metadata, err
-
}
// v3lockpb.RegisterLockHandlerServer registers the http handlers for service Lock to "mux".
// UnaryRPC :call v3lockpb.LockServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
-// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLockHandlerFromEndpoint instead.
func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3lockpb.LockServer) error {
-
mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
- var stream runtime.ServerTransportStream
- ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
if err != nil {
@@ -120,7 +114,6 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
return
}
resp, md, err := local_request_Lock_Lock_0(rctx, inboundMarshaler, server, req, pathParams)
- md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
@@ -128,14 +121,11 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
}
forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
- var stream runtime.ServerTransportStream
- ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
if err != nil {
@@ -143,7 +133,6 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
return
}
resp, md, err := local_request_Lock_Unlock_0(rctx, inboundMarshaler, server, req, pathParams)
- md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
@@ -151,7 +140,6 @@ func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
}
forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
return nil
@@ -194,7 +182,6 @@ func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "LockClient" to call the correct interceptors.
func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error {
-
mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -212,7 +199,6 @@ func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien
}
forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@@ -232,7 +218,6 @@ func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien
}
forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
})
return nil
diff --git a/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
new file mode 100644
index 00000000000..c6edbc60a31
--- /dev/null
+++ b/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
@@ -0,0 +1,365 @@
+// Code generated by protoc-gen-gogo.
+// source: v3lock.proto
+
+package v3lockpb
+
+import (
+ context "context"
+ "encoding/json"
+ fmt "fmt"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ etcdserverpb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+
+var (
+ _ = fmt.Errorf
+ _ = math.Inf
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type LockRequest struct {
+ // name is the identifier for the distributed shared lock to be acquired.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // lease is the ID of the lease that will be attached to ownership of the
+ // lock. If the lease expires or is revoked and currently holds the lock,
+ // the lock is automatically released. Calls to Lock with the same lease will
+ // be treated as a single acquisition; locking twice with the same lease is a
+ // no-op.
+ Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
+}
+
+func (m *LockRequest) Reset() { *m = LockRequest{} }
+func (m *LockRequest) String() string { return proto.CompactTextString(m) }
+func (*LockRequest) ProtoMessage() {}
+func (*LockRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_52389b3e2f253201, []int{0}
+}
+
+func (m *LockRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *LockRequest) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+type LockResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // key is a key that will exist on etcd for the duration that the Lock caller
+ // owns the lock. Users should not modify this key or the lock may exhibit
+ // undefined behavior.
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (m *LockResponse) Reset() { *m = LockResponse{} }
+func (m *LockResponse) String() string { return proto.CompactTextString(m) }
+func (*LockResponse) ProtoMessage() {}
+func (*LockResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_52389b3e2f253201, []int{1}
+}
+
+func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LockResponse) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+type UnlockRequest struct {
+ // key is the lock ownership key granted by Lock.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (m *UnlockRequest) Reset() { *m = UnlockRequest{} }
+func (m *UnlockRequest) String() string { return proto.CompactTextString(m) }
+func (*UnlockRequest) ProtoMessage() {}
+func (*UnlockRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_52389b3e2f253201, []int{2}
+}
+
+func (m *UnlockRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+type UnlockResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnlockResponse) Reset() { *m = UnlockResponse{} }
+func (m *UnlockResponse) String() string { return proto.CompactTextString(m) }
+func (*UnlockResponse) ProtoMessage() {}
+func (*UnlockResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_52389b3e2f253201, []int{3}
+}
+
+func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest")
+ proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse")
+ proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest")
+ proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse")
+}
+
+func init() { proto.RegisterFile("v3lock.proto", fileDescriptor_52389b3e2f253201) }
+
+var fileDescriptor_52389b3e2f253201 = []byte{
+ // 330 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9,
+ 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44,
+ 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x3e, 0xb5, 0x24, 0x39,
+ 0x45, 0x3f, 0xb1, 0x20, 0x53, 0x1f, 0xc4, 0x28, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0x2a, 0x48, 0xd2,
+ 0x2f, 0x2a, 0x48, 0x86, 0x2a, 0x90, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x2b, 0x49, 0xcc,
+ 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x86, 0xc8, 0x2a, 0x99, 0x73, 0x71, 0xfb,
+ 0xe4, 0x27, 0x67, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x09, 0x71, 0xb1, 0xe4, 0x25,
+ 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x42, 0x22, 0x5c, 0xac, 0x39,
+ 0xa9, 0x89, 0xc5, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x10, 0x8e, 0x52, 0x18, 0x17,
+ 0x0f, 0x44, 0x63, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x90, 0x09, 0x17, 0x5b, 0x46, 0x6a, 0x62,
+ 0x4a, 0x6a, 0x11, 0x58, 0x2f, 0xb7, 0x91, 0x8c, 0x1e, 0xb2, 0x7b, 0xf4, 0x60, 0xea, 0x3c, 0xc0,
+ 0x6a, 0x82, 0xa0, 0x6a, 0x85, 0x04, 0xb8, 0x98, 0xb3, 0x53, 0x2b, 0xc1, 0x26, 0xf3, 0x04, 0x81,
+ 0x98, 0x4a, 0x8a, 0x5c, 0xbc, 0xa1, 0x79, 0x39, 0x48, 0x4e, 0x82, 0x2a, 0x61, 0x44, 0x28, 0x71,
+ 0xe3, 0xe2, 0x83, 0x29, 0xa1, 0xc4, 0x72, 0xa3, 0x0d, 0x8c, 0x5c, 0x2c, 0x20, 0x3f, 0x08, 0xf9,
+ 0x43, 0x69, 0x51, 0x3d, 0x58, 0x60, 0xeb, 0x21, 0x05, 0x8a, 0x94, 0x18, 0xba, 0x30, 0xc4, 0x34,
+ 0x25, 0x89, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0x09, 0x29, 0xf1, 0xea, 0x97, 0x19, 0xeb, 0x83, 0x14,
+ 0x80, 0x09, 0x2b, 0x46, 0x2d, 0xa1, 0x70, 0x2e, 0x36, 0x88, 0x0b, 0x85, 0xc4, 0x11, 0x7a, 0x51,
+ 0xbc, 0x25, 0x25, 0x81, 0x29, 0x01, 0x35, 0x56, 0x0a, 0x6c, 0xac, 0x88, 0x12, 0x3f, 0xdc, 0xd8,
+ 0xd2, 0x3c, 0xa8, 0xc1, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0,
+ 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0, 0x78, 0x34, 0x06, 0x04, 0x00, 0x00,
+ 0xff, 0xff, 0x4a, 0x4d, 0xca, 0xbb, 0x36, 0x02, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// LockClient is the client API for Lock service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type LockClient interface {
+ // Lock acquires a distributed shared lock on a given named lock.
+ // On success, it will return a unique key that exists so long as the
+ // lock is held by the caller. This key can be used in conjunction with
+ // transactions to safely ensure updates to etcd only occur while holding
+ // lock ownership. The lock is held until Unlock is called on the key or the
+ // lease associate with the owner expires.
+ Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error)
+ // Unlock takes a key returned by Lock and releases the hold on lock. The
+ // next Lock caller waiting for the lock will then be woken up and given
+ // ownership of the lock.
+ Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error)
+}
+
+type lockClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewLockClient(cc *grpc.ClientConn) LockClient {
+ return &lockClient{cc}
+}
+
+func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) {
+ out := new(LockResponse)
+ err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) {
+ out := new(UnlockResponse)
+ err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// LockServer is the etcd API for Lock service.
+type LockServer interface {
+ // Lock acquires a distributed shared lock on a given named lock.
+ // On success, it will return a unique key that exists so long as the
+ // lock is held by the caller. This key can be used in conjunction with
+ // transactions to safely ensure updates to etcd only occur while holding
+ // lock ownership. The lock is held until Unlock is called on the key or the
+ // lease associate with the owner expires.
+ Lock(context.Context, *LockRequest) (*LockResponse, error)
+ // Unlock takes a key returned by Lock and releases the hold on lock. The
+ // next Lock caller waiting for the lock will then be woken up and given
+ // ownership of the lock.
+ Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error)
+}
+
+// UnimplementedLockServer can be embedded to have forward compatible implementations.
+type UnimplementedLockServer struct{}
+
+func (*UnimplementedLockServer) Lock(ctx context.Context, req *LockRequest) (*LockResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Lock not implemented")
+}
+
+func (*UnimplementedLockServer) Unlock(ctx context.Context, req *UnlockRequest) (*UnlockResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Unlock not implemented")
+}
+
+func RegisterLockServer(s *grpc.Server, srv LockServer) {
+ s.RegisterService(&_Lock_serviceDesc, srv)
+}
+
+func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LockServer).Lock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3lockpb.Lock/Lock",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LockServer).Lock(ctx, req.(*LockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UnlockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LockServer).Unlock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3lockpb.Lock/Unlock",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Lock_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "v3lockpb.Lock",
+ HandlerType: (*LockServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Lock",
+ Handler: _Lock_Lock_Handler,
+ },
+ {
+ MethodName: "Unlock",
+ Handler: _Lock_Unlock_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "v3lock.proto",
+}
+
+func (m *LockRequest) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *LockResponse) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *UnlockRequest) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *UnlockResponse) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *LockRequest) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *LockResponse) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *UnlockRequest) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *UnlockResponse) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func sovV3Lock(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+
+func (m *LockRequest) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *LockResponse) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+var (
+ ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupV3Lock = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/server/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
similarity index 100%
rename from server/etcdserver/api/v3lock/v3lockpb/v3lock.proto
rename to etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
diff --git a/etcd/etcdserver/api/v3rpc/auth.go b/etcd/etcdserver/api/v3rpc/auth.go
new file mode 100644
index 00000000000..17ce3262c21
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/auth.go
@@ -0,0 +1,166 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+type AuthServer struct {
+ authenticator etcdserver.Authenticator
+}
+
+func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
+ return &AuthServer{authenticator: s}
+}
+
+func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+ resp, err := as.authenticator.AuthEnable(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+ resp, err := as.authenticator.AuthDisable(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) {
+ resp, err := as.authenticator.AuthStatus(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ resp, err := as.authenticator.Authenticate(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := as.authenticator.RoleAdd(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := as.authenticator.RoleDelete(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := as.authenticator.RoleGet(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := as.authenticator.RoleList(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := as.authenticator.RoleRevokePermission(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := as.authenticator.RoleGrantPermission(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ resp, err := as.authenticator.UserAdd(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := as.authenticator.UserDelete(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := as.authenticator.UserGet(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := as.authenticator.UserList(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := as.authenticator.UserGrantRole(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := as.authenticator.UserRevokeRole(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ resp, err := as.authenticator.UserChangePassword(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
diff --git a/server/etcdserver/api/v3rpc/codec.go b/etcd/etcdserver/api/v3rpc/codec.go
similarity index 93%
rename from server/etcdserver/api/v3rpc/codec.go
rename to etcd/etcdserver/api/v3rpc/codec.go
index d599ff63cc3..42cef4462c1 100644
--- a/server/etcdserver/api/v3rpc/codec.go
+++ b/etcd/etcdserver/api/v3rpc/codec.go
@@ -20,12 +20,10 @@ type codec struct{}
func (c *codec) Marshal(v interface{}) ([]byte, error) {
b, err := proto.Marshal(v.(proto.Message))
- sentBytes.Add(float64(len(b)))
return b, err
}
func (c *codec) Unmarshal(data []byte, v interface{}) error {
- receivedBytes.Add(float64(len(data)))
return proto.Unmarshal(data, v.(proto.Message))
}
diff --git a/server/etcdserver/api/v3rpc/header.go b/etcd/etcdserver/api/v3rpc/header.go
similarity index 80%
rename from server/etcdserver/api/v3rpc/header.go
rename to etcd/etcdserver/api/v3rpc/header.go
index a8f1f92cf99..4bb62f6850c 100644
--- a/server/etcdserver/api/v3rpc/header.go
+++ b/etcd/etcdserver/api/v3rpc/header.go
@@ -15,28 +15,27 @@
package v3rpc
import (
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/apply"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
)
type header struct {
clusterID int64
- memberID int64
- sg apply.RaftStatusGetter
+ memberID int64 // 本节点的ID
+ sg etcdserver.RaftStatusGetter
rev func() int64
}
func newHeader(s *etcdserver.EtcdServer) header {
return header{
clusterID: int64(s.Cluster().ID()),
- memberID: int64(s.MemberId()),
+ memberID: int64(s.ID()),
sg: s,
rev: func() int64 { return s.KV().Rev() },
}
}
-// fill populates pb.ResponseHeader using etcdserver information
+// fill 填充pb.使用etcdserver信息的ResponseHeader
func (h *header) fill(rh *pb.ResponseHeader) {
if rh == nil {
panic("unexpected nil resp.Header")
diff --git a/etcd/etcdserver/api/v3rpc/interceptor.go b/etcd/etcdserver/api/v3rpc/interceptor.go
new file mode 100644
index 00000000000..0fc4ebecae8
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/interceptor.go
@@ -0,0 +1,200 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+)
+
+const (
+ maxNoLeaderCnt = 3
+ warnUnaryRequestLatency = 300 * time.Millisecond
+ snapshotMethod = "/etcdserverpb.Maintenance/Snapshot"
+)
+
+type streamsMap struct {
+ mu sync.Mutex
+ streams map[grpc.ServerStream]struct{}
+}
+
+func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ if !api.IsCapabilityEnabled(api.V3rpcCapability) { // 是否启用了v3rpc
+ return nil, rpctypes.ErrGRPCNotCapable
+ }
+ // 集群包含自己,自己是learner,
+ if s.IsMemberExist(s.ID()) && s.IsLearner() && !isRPCSupportedForLearner(req) {
+ return nil, rpctypes.ErrGPRCNotSupportedForLearner
+ }
+
+ md, ok := metadata.FromIncomingContext(ctx)
+ if ok {
+ // data, _ := json.Marshal(md)
+ // s.Logger().Info("-", zap.String("metadata", string(data)))
+ // hasleader
+ if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
+ if s.Leader() == types.ID(raft.None) {
+ return nil, rpctypes.ErrGRPCNoLeader
+ }
+ }
+ }
+
+ return handler(ctx, req)
+ }
+}
+
+func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
+ smap := monitorLeader(s)
+
+ return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ if !api.IsCapabilityEnabled(api.V3rpcCapability) {
+ return rpctypes.ErrGRPCNotCapable
+ }
+
+ if s.IsMemberExist(s.ID()) && s.IsLearner() && info.FullMethod != snapshotMethod { // 除了快照,学习者不支持流RPC
+ return rpctypes.ErrGPRCNotSupportedForLearner
+ }
+
+ md, ok := metadata.FromIncomingContext(ss.Context())
+ if ok {
+ ks := md[rpctypes.MetadataRequireLeaderKey] // hasleader
+ if len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
+ if s.Leader() == types.ID(raft.None) {
+ return rpctypes.ErrGRPCNoLeader
+ }
+
+ ctx := newCancellableContext(ss.Context())
+ ss = serverStreamWithCtx{ctx: ctx, ServerStream: ss}
+
+ smap.mu.Lock()
+ smap.streams[ss] = struct{}{}
+ smap.mu.Unlock()
+
+ defer func() {
+ smap.mu.Lock()
+ delete(smap.streams, ss)
+ smap.mu.Unlock()
+ // TODO: investigate whether the reason for cancellation here is useful to know
+ ctx.Cancel(nil)
+ }()
+ }
+ }
+
+ return handler(srv, ss)
+ }
+}
+
+// cancellableContext wraps a context with new cancellable context that allows a
+// specific cancellation error to be preserved and later retrieved using the
+// Context.Err() function. This is so downstream context users can disambiguate
+// the reason for the cancellation which could be from the client (for example)
+// or from this interceptor code.
+type cancellableContext struct {
+ context.Context
+
+ lock sync.RWMutex
+ cancel context.CancelFunc
+ cancelReason error
+}
+
+func newCancellableContext(parent context.Context) *cancellableContext {
+ ctx, cancel := context.WithCancel(parent)
+ return &cancellableContext{
+ Context: ctx,
+ cancel: cancel,
+ }
+}
+
+// Cancel stores the cancellation reason and then delegates to context.WithCancel
+// against the parent context.
+func (c *cancellableContext) Cancel(reason error) {
+ c.lock.Lock()
+ c.cancelReason = reason
+ c.lock.Unlock()
+ c.cancel()
+}
+
+// Err will return the preserved cancel reason error if present, and will
+// otherwise return the underlying error from the parent context.
+func (c *cancellableContext) Err() error {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if c.cancelReason != nil {
+ return c.cancelReason
+ }
+ return c.Context.Err()
+}
+
+type serverStreamWithCtx struct {
+ grpc.ServerStream
+
+ // ctx is used so that we can preserve a reason for cancellation.
+ ctx *cancellableContext
+}
+
+func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
+
+func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
+ smap := &streamsMap{
+ streams: make(map[grpc.ServerStream]struct{}),
+ }
+
+ s.GoAttach(func() {
+ election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
+ noLeaderCnt := 0
+
+ for {
+ select {
+ case <-s.StoppingNotify():
+ return
+ case <-time.After(election):
+ if s.Leader() == types.ID(raft.None) {
+ noLeaderCnt++
+ } else {
+ noLeaderCnt = 0
+ }
+
+ // We are more conservative on canceling existing streams. Reconnecting streams
+ // cost much more than just rejecting new requests. So we wait until the member
+ // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
+ if noLeaderCnt >= maxNoLeaderCnt {
+ smap.mu.Lock()
+ for ss := range smap.streams {
+ if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
+ ssWithCtx.ctx.Cancel(rpctypes.ErrGRPCNoLeader)
+ <-ss.Context().Done()
+ }
+ }
+ smap.streams = make(map[grpc.ServerStream]struct{})
+ smap.mu.Unlock()
+ }
+ }
+ }
+ })
+
+ return smap
+}
diff --git a/etcd/etcdserver/api/v3rpc/key.go b/etcd/etcdserver/api/v3rpc/key.go
new file mode 100644
index 00000000000..301d038129f
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/key.go
@@ -0,0 +1,287 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3rpc implements etcd v3 RPC system based on gRPC.
+package v3rpc
+
+import (
+ "context"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/adt"
+)
+
+type kvServer struct {
+ hdr header
+ kv etcdserver.RaftKV
+ // maxTxnOps is the max operations per txn.
+ // e.g suppose maxTxnOps = 128.
+ // Txn.Success can have at most 128 operations,
+ // and Txn.Failure can have at most 128 operations.
+ maxTxnOps uint
+}
+
+func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
+ return &kvServer{hdr: newHeader(s), kv: s, maxTxnOps: s.Cfg.MaxTxnOps}
+}
+
+func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ if err := checkPutRequest(r); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.Put(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+// DeleteRange 从键值存储中删除给定的范围
+// 删除请求增加键值存储的revision ,并在事件历史中为每个被删除的key生成一个删除事件
+func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ if err := checkDeleteRequest(r); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.DeleteRange(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+// Txn 在单个事务中处理多个请求一个事务中请求增加键值存储的z evisio n ,并为每个完成的请求生成一个带有相同
+// revision 的事件不允许在一个txn 中多次修改同一个key.
+func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if err := checkTxnRequest(r, int(s.maxTxnOps)); err != nil {
+ return nil, err
+ }
+ // check for forbidden put/del overlaps after checking request to avoid quadratic blowup
+ if _, _, err := checkIntervals(r.Success); err != nil {
+ return nil, err
+ }
+ if _, _, err := checkIntervals(r.Failure); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.Txn(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+// Compact 压缩在etcd键值存储中的事件历史
+// 键值存储应该定期压缩,否则事件历史会无限制地持续增长,消耗系统的大量磁盘空间
+func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+ resp, err := s.kv.Compact(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func checkPutRequest(r *pb.PutRequest) error {
+ if len(r.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ if r.IgnoreValue && len(r.Value) != 0 {
+ return rpctypes.ErrGRPCValueProvided
+ }
+ if r.IgnoreLease && r.Lease != 0 {
+ return rpctypes.ErrGRPCLeaseProvided
+ }
+ return nil
+}
+
+func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
+ if len(r.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ return nil
+}
+
+func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error {
+ opc := len(r.Compare)
+ if opc < len(r.Success) {
+ opc = len(r.Success)
+ }
+ if opc < len(r.Failure) {
+ opc = len(r.Failure)
+ }
+ if opc > maxTxnOps {
+ return rpctypes.ErrGRPCTooManyOps
+ }
+
+ for _, c := range r.Compare {
+ if len(c.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ }
+ for _, u := range r.Success {
+ if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
+ return err
+ }
+ }
+ for _, u := range r.Failure {
+ if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// checkIntervals tests whether puts and deletes overlap for a list of ops. If
+// there is an overlap, returns an error. If no overlap, return put and delete
+// sets for recursive evaluation.
+func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) {
+ dels := adt.NewIntervalTree()
+
+ // collect deletes from this level; build first to check lower level overlapped puts
+ for _, req := range reqs {
+ ok := req.RequestOp_RequestDeleteRange != nil
+ tv := req.RequestOp_RequestDeleteRange
+ if !ok {
+ continue
+ }
+ dreq := tv.RequestDeleteRange
+ if dreq == nil {
+ continue
+ }
+ var iv adt.Interval
+ if len(dreq.RangeEnd) != 0 {
+ iv = adt.NewStringAffineInterval(string(dreq.Key), string(dreq.RangeEnd))
+ } else {
+ iv = adt.NewStringAffinePoint(string(dreq.Key))
+ }
+ dels.Insert(iv, struct{}{})
+ }
+
+ // collect children puts/deletes
+ puts := make(map[string]struct{})
+ for _, req := range reqs {
+ ok := req.RequestOp_RequestTxn != nil
+ tv := req.RequestOp_RequestTxn
+ if !ok {
+ continue
+ }
+ putsThen, delsThen, err := checkIntervals(tv.RequestTxn.Success)
+ if err != nil {
+ return nil, dels, err
+ }
+ putsElse, delsElse, err := checkIntervals(tv.RequestTxn.Failure)
+ if err != nil {
+ return nil, dels, err
+ }
+ for k := range putsThen {
+ if _, ok := puts[k]; ok {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ if dels.Intersects(adt.NewStringAffinePoint(k)) {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ puts[k] = struct{}{}
+ }
+ for k := range putsElse {
+ if _, ok := puts[k]; ok {
+ // if key is from putsThen, overlap is OK since
+ // either then/else are mutually exclusive
+ if _, isSafe := putsThen[k]; !isSafe {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ }
+ if dels.Intersects(adt.NewStringAffinePoint(k)) {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ puts[k] = struct{}{}
+ }
+ dels.Union(delsThen, adt.NewStringAffineInterval("\x00", ""))
+ dels.Union(delsElse, adt.NewStringAffineInterval("\x00", ""))
+ }
+
+ // collect and check this level's puts
+ for _, req := range reqs {
+ ok := req.RequestOp_RequestPut != nil
+ tv := req.RequestOp_RequestPut
+ if !ok || tv.RequestPut == nil {
+ continue
+ }
+ k := string(tv.RequestPut.Key)
+ if _, ok := puts[k]; ok {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ if dels.Intersects(adt.NewStringAffinePoint(k)) {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ puts[k] = struct{}{}
+ }
+ return puts, dels, nil
+}
+
+func checkRequestOp(u *pb.RequestOp, maxTxnOps int) error {
+ if u.RequestOp_RequestRange != nil {
+ return checkRangeRequest(u.RequestOp_RequestRange.RequestRange)
+ }
+
+ if u.RequestOp_RequestPut != nil {
+ return checkPutRequest(u.RequestOp_RequestPut.RequestPut)
+ }
+ if u.RequestOp_RequestDeleteRange != nil {
+ return checkDeleteRequest(u.RequestOp_RequestDeleteRange.RequestDeleteRange)
+ }
+ if u.RequestOp_RequestTxn != nil {
+ return checkTxnRequest(u.RequestOp_RequestTxn.RequestTxn, maxTxnOps)
+ }
+
+ // empty op / nil entry
+ return rpctypes.ErrGRPCKeyNotFound
+}
+
+// -------------------------------------------- OVER ----------------------------------------------------
+
+var _ = NewQuotaKVServer
+
+func checkRangeRequest(r *pb.RangeRequest) error {
+ if len(r.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ return nil
+}
+
+// Range etcdctl get a
+func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ if err := checkRangeRequest(r); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.Range(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
diff --git a/etcd/etcdserver/api/v3rpc/maintenance.go b/etcd/etcdserver/api/v3rpc/maintenance.go
new file mode 100644
index 00000000000..2a90a6636d6
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/maintenance.go
@@ -0,0 +1,306 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "crypto/sha256"
+ "io"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+)
+
+type KVGetter interface {
+ KV() mvcc.WatchableKV
+}
+
+type BackendGetter interface {
+ Backend() backend.Backend
+}
+
+type Alarmer interface {
+ Alarms() []*pb.AlarmMember
+ Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
+}
+
+type Downgrader interface {
+ Downgrade(ctx context.Context, dr *pb.DowngradeRequest) (*pb.DowngradeResponse, error)
+}
+
+type LeaderTransferrer interface {
+ MoveLeader(ctx context.Context, lead, target uint64) error
+}
+
+type AuthGetter interface {
+ AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
+ AuthStore() auth.AuthStore
+}
+
+type ClusterStatusGetter interface {
+ IsLearner() bool
+}
+
+type maintenanceServer struct {
+ lg *zap.Logger
+ rg etcdserver.RaftStatusGetter // 获取raft状态
+ kg KVGetter
+ bg BackendGetter
+ a Alarmer
+ lt LeaderTransferrer
+ hdr header
+ cs ClusterStatusGetter
+ d Downgrader
+}
+
+func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
+ srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s), cs: s, d: s}
+ if srv.lg == nil {
+ srv.lg = zap.NewNop()
+ }
+ return &authMaintenanceServer{srv, s}
+}
+
+// Defragment 碎片整理
+func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+ ms.lg.Info("开始 碎片整理")
+ err := ms.bg.Backend().Defrag()
+ if err != nil {
+ ms.lg.Warn("碎片整理是啊比", zap.Error(err))
+ return nil, err
+ }
+ ms.lg.Info("结束 碎片整理")
+ return &pb.DefragmentResponse{}, nil
+}
+
+// big enough size to hold >1 OS pages in the buffer
+const snapshotSendBufferSize = 32 * 1024
+
+// MoveLeader OK
+func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+ if ms.rg.ID() != ms.rg.Leader() {
+ return nil, rpctypes.ErrGRPCNotLeader
+ }
+
+ if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MoveLeaderResponse{}, nil
+}
+
+func (ms *maintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ resp, err := ms.d.Downgrade(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ resp.Header = &pb.ResponseHeader{}
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+type authMaintenanceServer struct {
+ *maintenanceServer
+ ag AuthGetter
+}
+
+func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
+ authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+
+ return ams.ag.AuthStore().IsAdminPermitted(authInfo)
+}
+
+func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+ if err := ams.isAuthenticated(ctx); err != nil {
+ return nil, err
+ }
+
+ return ams.maintenanceServer.Defragment(ctx, sr)
+}
+
+func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+ if err := ams.isAuthenticated(ctx); err != nil {
+ return nil, err
+ }
+
+ return ams.maintenanceServer.Hash(ctx, r)
+}
+
+func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+ if err := ams.isAuthenticated(ctx); err != nil {
+ return nil, err
+ }
+ return ams.maintenanceServer.HashKV(ctx, r)
+}
+
+func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
+ return ams.maintenanceServer.Status(ctx, ar)
+}
+
+func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+ return ams.maintenanceServer.MoveLeader(ctx, tr)
+}
+
+func (ams *authMaintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ return ams.maintenanceServer.Downgrade(ctx, r)
+}
+
+// ------------------------------------ OVER ---------------------------------------------------------------
+
+// Alarm ok
+func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp, err := ms.a.Alarm(ctx, ar)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ if resp.Header == nil {
+ resp.Header = &pb.ResponseHeader{}
+ }
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+// Status ok
+func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
+ hdr := &pb.ResponseHeader{}
+ ms.hdr.fill(hdr)
+ resp := &pb.StatusResponse{
+ Header: hdr,
+ Version: version.Version,
+ Leader: uint64(ms.rg.Leader()),
+ RaftIndex: ms.rg.CommittedIndex(),
+ RaftAppliedIndex: ms.rg.AppliedIndex(),
+ RaftTerm: ms.rg.Term(),
+ DbSize: ms.bg.Backend().Size(),
+ DbSizeInUse: ms.bg.Backend().SizeInUse(),
+ IsLearner: ms.cs.IsLearner(),
+ }
+ if resp.Leader == raft.None {
+ resp.Errors = append(resp.Errors, etcdserver.ErrNoLeader.Error())
+ }
+ for _, a := range ms.a.Alarms() {
+ resp.Errors = append(resp.Errors, a.String())
+ }
+ return resp, nil
+}
+
+// Snapshot 获取一个快照
+func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
+ if err := ams.isAuthenticated(srv.Context()); err != nil {
+ return err
+ }
+
+ return ams.maintenanceServer.Snapshot(sr, srv)
+}
+
+// Snapshot 获取一个快照
+func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
+ snap := ms.bg.Backend().Snapshot() // 快照结构体, 初始化 发送超时
+ pr, pw := io.Pipe()
+ defer pr.Close()
+
+ go func() {
+ snap.WriteTo(pw)
+ if err := snap.Close(); err != nil {
+ ms.lg.Warn("关闭快照失败", zap.Error(err))
+ }
+ pw.Close()
+ }()
+
+ // record快照恢复操作中用于完整性检查的快照数据的SHA摘要
+ h := sha256.New()
+
+ sent := int64(0)
+ total := snap.Size()
+ size := humanize.Bytes(uint64(total))
+
+ start := time.Now()
+ ms.lg.Info("往客户端发送一个快照", zap.Int64("total-bytes", total), zap.String("size", size))
+ for total-sent > 0 {
+ // buffer只保存从流中读取的字节,响应大小是OS页面大小的倍数,从boltdb中获取
+ // 例如4 * 1024
+ // Send并不等待消息被客户端接收.因此,在Send操作之间不能安全地重用缓冲区
+
+ buf := make([]byte, snapshotSendBufferSize)
+
+ n, err := io.ReadFull(pr, buf)
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return togRPCError(err)
+ }
+ sent += int64(n)
+
+ // 如果total是x * snapshotSendBufferSize.这种反应是有可能的.RemainingBytes = = 0
+ // 分别地.这是否使etcd响应发送到客户端nil在原型和客户端停止接收从快照流之前etcd发送快照SHA?
+ // 不,客户端仍然会收到非nil响应,直到etcd用EOF关闭流
+ resp := &pb.SnapshotResponse{
+ RemainingBytes: uint64(total - sent),
+ Blob: buf[:n],
+ }
+ if err = srv.Send(resp); err != nil {
+ return togRPCError(err)
+ }
+ h.Write(buf[:n])
+ }
+
+ sha := h.Sum(nil)
+
+ ms.lg.Info("往客户端发送一个快照校验和", zap.Int64("total-bytes", total), zap.Int("checksum-size", len(sha)))
+ hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
+ if err := srv.Send(hresp); err != nil {
+ return togRPCError(err)
+ }
+
+ ms.lg.Info("成功发送快照到客户端", zap.Int64("total-bytes", total),
+ zap.String("size", size),
+ zap.String("took", humanize.Time(start)),
+ )
+ return nil
+}
+
+// Hash ok
+func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+ h, rev, err := ms.kg.KV().Hash()
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+// HashKV OK
+func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+ h, rev, compactRev, err := ms.kg.KV().HashByRev(r.Revision)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h, CompactRevision: compactRev}
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
diff --git a/etcd/etcdserver/api/v3rpc/over_grpc.go b/etcd/etcdserver/api/v3rpc/over_grpc.go
new file mode 100644
index 00000000000..e2e69b22ec8
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/over_grpc.go
@@ -0,0 +1,87 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "crypto/tls"
+ "math"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/credentials"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
+ grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/health"
+ healthpb "google.golang.org/grpc/health/grpc_health_v1"
+)
+
+const (
+ grpcOverheadBytes = 512 * 1024
+ maxStreams = math.MaxUint32
+ maxSendBytes = math.MaxInt32
+)
+
+func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnaryServerInterceptor, gopts ...grpc.ServerOption) *grpc.Server {
+ var opts []grpc.ServerOption
+ opts = append(opts, grpc.CustomCodec(&codec{}))
+ if tls != nil {
+ bundle := credentials.NewBundle(credentials.Config{TLSConfig: tls})
+ opts = append(opts, grpc.Creds(bundle.TransportCredentials()))
+ }
+ // 单次通信
+ chainUnaryInterceptors := []grpc.UnaryServerInterceptor{
+ newUnaryInterceptor(s), // 元信息校验
+ grpc_prometheus.UnaryServerInterceptor,
+ }
+ if interceptor != nil {
+ chainUnaryInterceptors = append(chainUnaryInterceptors, interceptor)
+ }
+ // 流式通信
+ chainStreamInterceptors := []grpc.StreamServerInterceptor{
+ newStreamInterceptor(s),
+ grpc_prometheus.StreamServerInterceptor,
+ }
+
+ if s.Cfg.ExperimentalEnableDistributedTracing { // 默认false
+ chainUnaryInterceptors = append(chainUnaryInterceptors, otelgrpc.UnaryServerInterceptor(s.Cfg.ExperimentalTracerOptions...))
+ chainStreamInterceptors = append(chainStreamInterceptors, otelgrpc.StreamServerInterceptor(s.Cfg.ExperimentalTracerOptions...))
+ }
+
+ opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(chainUnaryInterceptors...)))
+ opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(chainStreamInterceptors...)))
+
+ opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
+ opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
+ opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
+
+ grpcServer := grpc.NewServer(append(opts, gopts...)...)
+
+ pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) // kv存储
+ pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) // 监听
+ pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) // 租约
+ pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) // 集群
+ pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) // 认证
+ pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) // 维护
+
+ hsrv := health.NewServer()
+ hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) // 设置初始状态
+ healthpb.RegisterHealthServer(grpcServer, hsrv)
+ grpc_prometheus.Register(grpcServer)
+
+ return grpcServer
+}
diff --git a/etcd/etcdserver/api/v3rpc/over_lease.go b/etcd/etcdserver/api/v3rpc/over_lease.go
new file mode 100644
index 00000000000..d5666b711c2
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/over_lease.go
@@ -0,0 +1,173 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "io"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+)
+
+type LeaseServer struct {
+ lg *zap.Logger
+ hdr header
+ le etcdserver.Lessor
+}
+
+func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
+ srv := &LeaseServer{lg: s.Cfg.Logger, le: s, hdr: newHeader(s)}
+ if srv.lg == nil {
+ srv.lg = zap.NewNop()
+ }
+ return srv
+}
+
+func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ resp, err := ls.le.LeaseTimeToLive(ctx, rr)
+ if err != nil && err != lease.ErrLeaseNotFound {
+ return nil, togRPCError(err)
+ }
+ if err == lease.ErrLeaseNotFound {
+ resp = &pb.LeaseTimeToLiveResponse{
+ Header: &pb.ResponseHeader{},
+ ID: rr.ID,
+ TTL: -1,
+ }
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+// LeaseLeases 获取当前节点上的所有租约
+func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+ resp, err := ls.le.LeaseLeases(ctx, rr)
+ if err != nil && err != lease.ErrLeaseNotFound {
+ return nil, togRPCError(err)
+ }
+ if err == lease.ErrLeaseNotFound {
+ resp = &pb.LeaseLeasesResponse{
+ Header: &pb.ResponseHeader{},
+ Leases: []*pb.LeaseStatus{},
+ }
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+// LeaseKeepAlive OK
+func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
+ errc := make(chan error, 1)
+ go func() {
+ errc <- ls.leaseKeepAlive(stream)
+ }()
+ select {
+ case err = <-errc:
+ case <-stream.Context().Done():
+ err = stream.Context().Err()
+ if err == context.Canceled {
+ err = rpctypes.ErrGRPCNoLeader
+ }
+ }
+ return err
+}
+
+func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
+ for {
+ req, err := stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ if isClientCtxErr(stream.Context().Err(), err) {
+ ls.lg.Debug("从gRPC流获取lease keepalive请求失败", zap.Error(err))
+ } else {
+ ls.lg.Warn("从gRPC流获取lease keepalive请求失败", zap.Error(err))
+ }
+ return err
+ }
+
+ // 在发送更新请求之前创建报头.这可以确保修订严格小于或等于本地etcd(当本地etcd是leader时)或远端leader发生keepalive.
+ // 如果没有这个,租约可能在rev 3被撤销,但客户端可以看到在rev 4成功的keepalive.
+ resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
+ ls.hdr.fill(resp.Header)
+
+ ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
+ if err == lease.ErrLeaseNotFound {
+ err = nil
+ ttl = 0
+ }
+
+ if err != nil {
+ return togRPCError(err)
+ }
+
+ resp.TTL = ttl
+ err = stream.Send(resp)
+ if err != nil {
+ if isClientCtxErr(stream.Context().Err(), err) {
+ ls.lg.Debug("往grpc Stream发送lease Keepalive响应失败", zap.Error(err))
+ } else {
+ ls.lg.Warn("往grpc Stream发送lease Keepalive响应失败", zap.Error(err))
+ }
+ return err
+ }
+ }
+}
+
+type quotaLeaseServer struct {
+ pb.LeaseServer
+ qa quotaAlarmer
+}
+
+// LeaseGrant 创建租约
+func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ if err := s.qa.check(ctx, cr); err != nil { // 检查存储空间是否还有空余,以及抛出警报
+ return nil, err
+ }
+ return s.LeaseServer.LeaseGrant(ctx, cr)
+}
+
+// LeaseGrant 创建租约
+func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ resp, err := ls.le.LeaseGrant(ctx, cr)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
+ return "aLeaseServer{
+ NewLeaseServer(s),
+ quotaAlarmer{etcdserver.NewBackendQuota(s, "lease"), s, s.ID()},
+ }
+}
+
+// LeaseRevoke OK
+func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ resp, err := ls.le.LeaseRevoke(ctx, rr)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
diff --git a/etcd/etcdserver/api/v3rpc/over_member.go b/etcd/etcdserver/api/v3rpc/over_member.go
new file mode 100644
index 00000000000..71e8509bf0c
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/over_member.go
@@ -0,0 +1,143 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+type ClusterServerInterFace interface {
+ MemberList(ctx context.Context, response *pb.MemberListRequest) (*pb.MemberListResponse, error)
+ MemberAdd(ctx context.Context, response *pb.MemberAddRequest) (*pb.MemberAddResponse, error)
+ MemberRemove(ctx context.Context, response *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error)
+ MemberUpdate(ctx context.Context, response *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error)
+ MemberPromote(ctx context.Context, response *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error)
+}
+
+var _ ClusterServerInterFace = &ClusterServer{}
+
+type ClusterServer struct {
+ cluster api.Cluster
+ server *etcdserver.EtcdServer
+}
+
+func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
+ return &ClusterServer{
+ cluster: s.Cluster(),
+ server: s,
+ }
+}
+
+// MemberList OK
+func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
+ if r.Linearizable {
+ if err := cs.server.LinearizableReadNotify(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+ }
+ membs := membersToProtoMembers(cs.cluster.Members())
+ return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
+}
+
+// MemberAdd ok
+func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
+ urls, err := types.NewURLs(r.PeerURLs)
+ if err != nil {
+ return nil, rpctypes.ErrGRPCMemberBadURLs
+ }
+
+ now := time.Now()
+ var m *membership.Member
+ if r.IsLearner {
+ m = membership.NewMemberAsLearner("", urls, "", &now)
+ } else {
+ m = membership.NewMember("", urls, "", &now)
+ }
+ membs, merr := cs.server.AddMember(ctx, *m)
+ if merr != nil {
+ return nil, togRPCError(merr)
+ }
+
+ return &pb.MemberAddResponse{
+ Header: cs.header(),
+ Member: &pb.Member{
+ ID: uint64(m.ID),
+ PeerURLs: m.PeerURLs,
+ IsLearner: m.IsLearner,
+ },
+ Members: membersToProtoMembers(membs),
+ }, nil
+}
+
+func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
+ membs, err := cs.server.RemoveMember(ctx, r.ID)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
+ m := membership.Member{
+ ID: types.ID(r.ID),
+ RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
+ }
+ membs, err := cs.server.UpdateMember(ctx, m)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) {
+ membs, err := cs.server.PromoteMember(ctx, r.ID)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MemberPromoteResponse{
+ Header: cs.header(),
+ Members: membersToProtoMembers(membs),
+ }, nil
+}
+
+func (cs *ClusterServer) header() *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(cs.cluster.ID()),
+ MemberId: uint64(cs.server.ID()),
+ RaftTerm: cs.server.Term(),
+ }
+}
+
+func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
+ protoMembs := make([]*pb.Member, len(membs))
+ for i := range membs {
+ protoMembs[i] = &pb.Member{
+ Name: membs[i].Name,
+ ID: uint64(membs[i].ID),
+ PeerURLs: membs[i].PeerURLs,
+ ClientURLs: membs[i].ClientURLs,
+ IsLearner: membs[i].IsLearner,
+ }
+ }
+ return protoMembs
+}
diff --git a/etcd/etcdserver/api/v3rpc/over_quota.go b/etcd/etcdserver/api/v3rpc/over_quota.go
new file mode 100644
index 00000000000..e5d07f3cfcf
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/over_quota.go
@@ -0,0 +1,73 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+// 配额
+
+type quotaKVServer struct {
+ pb.KVServer
+ qa quotaAlarmer
+}
+
+type quotaAlarmer struct {
+ q etcdserver.Quota // 配额计算
+ a Alarmer
+ id types.ID
+}
+
+// check 请求是否满足配额.如果没有足够的空间.忽略请求并发出自由空间警报.
+func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
+ if qa.q.Available(r) { // 检查存储空间
+ return nil
+ }
+ // 没有存储空间
+ req := &pb.AlarmRequest{
+ MemberID: uint64(qa.id),
+ Action: pb.AlarmRequest_ACTIVATE, // check
+ Alarm: pb.AlarmType_NOSPACE,
+ }
+ qa.a.Alarm(ctx, req)
+ return rpctypes.ErrGRPCNoSpace
+}
+
+func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
+ return "aKVServer{
+ NewKVServer(s),
+ quotaAlarmer{etcdserver.NewBackendQuota(s, "kv"), s, s.ID()},
+ }
+}
+
+func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ if err := s.qa.check(ctx, r); err != nil {
+ return nil, err
+ }
+ return s.KVServer.Put(ctx, r)
+}
+
+func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if err := s.qa.check(ctx, r); err != nil {
+ return nil, err
+ }
+ return s.KVServer.Txn(ctx, r)
+}
diff --git a/etcd/etcdserver/api/v3rpc/over_watch.go b/etcd/etcdserver/api/v3rpc/over_watch.go
new file mode 100644
index 00000000000..d7a5e636481
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/over_watch.go
@@ -0,0 +1,522 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "io"
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+)
+
+const minWatchProgressInterval = 100 * time.Millisecond
+
+type watchServer struct {
+ lg *zap.Logger
+ clusterID int64
+ memberID int64
+ maxRequestBytes int
+ sg etcdserver.RaftStatusGetter
+ watchable mvcc.WatchableKV
+ ag AuthGetter
+}
+
+var (
+ progressReportInterval = 10 * time.Minute
+ progressReportIntervalMu sync.RWMutex
+)
+
+// SetProgressReportInterval 更新进度汇报间隔
+func SetProgressReportInterval(newTimeout time.Duration) {
+ progressReportIntervalMu.Lock()
+ progressReportInterval = newTimeout
+ progressReportIntervalMu.Unlock()
+}
+
+// We send ctrl response inside the read loop. We do not want
+// send to block read, but we still want ctrl response we sent to
+// be serialized. Thus we use a buffered chan to solve the problem.
+// A small buffer should be OK for most cases, since we expect the
+// ctrl requests are infrequent.
+const ctrlStreamBufLen = 16
+
+// serverWatchStream is an etcd etcd side stream. It receives requests
+// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
+// and creates responses that forwarded to gRPC stream.
+// It also forwards control message like watch created and canceled.
+type serverWatchStream struct {
+ lg *zap.Logger
+ clusterID int64
+ memberID int64
+ maxRequestBytes int
+ sg etcdserver.RaftStatusGetter
+ watchable mvcc.WatchableKV
+ ag AuthGetter
+ gRPCStream pb.Watch_WatchServer // 与客户端进行连接的 Stream
+ watchStream mvcc.WatchStream // key 变动的消息管道
+ ctrlStream chan *pb.WatchResponse // 用来发送控制响应的Chan,比如watcher创建和取消.
+
+ // mu protects progress, prevKV, fragment
+ mu sync.RWMutex
+ // tracks the watchID that stream might need to send progress to
+ // TODO: combine progress and prevKV into a single struct?
+ progress map[mvcc.WatchID]bool // 该类型的 watch,服务端会定时发送类似心跳消息
+ prevKV map[mvcc.WatchID]bool // 该类型表明,对于/a/b 这样的监听范围, 如果 b 变化了, 前缀/a也需要通知
+ fragment map[mvcc.WatchID]bool // 该类型表明,传输数据量大于阈值,需要拆分发送
+ closec chan struct{}
+ wg sync.WaitGroup // 等待send loop 完成
+}
+
+// Watch 创建一个watcher stream
+func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
+ sws := serverWatchStream{
+ lg: ws.lg,
+ clusterID: ws.clusterID,
+ memberID: ws.memberID,
+ maxRequestBytes: ws.maxRequestBytes,
+ sg: ws.sg, // 获取状态
+ watchable: ws.watchable,
+ ag: ws.ag, // 认证服务
+ gRPCStream: stream, //
+ watchStream: ws.watchable.NewWatchStream(),
+ ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), // 用来发送控制响应的Chan,比如watcher创建和取消.
+ progress: make(map[mvcc.WatchID]bool),
+ prevKV: make(map[mvcc.WatchID]bool),
+ fragment: make(map[mvcc.WatchID]bool),
+ closec: make(chan struct{}),
+ }
+
+ sws.wg.Add(1)
+ go func() {
+ sws.sendLoop() // 回复变更事件、阻塞
+ sws.wg.Done()
+ }()
+
+ errc := make(chan error, 1)
+ // 理想情况下,recvLoop也会使用sws.wg.当stream. context (). done()被关闭时,流的recv可能会继续阻塞,因为它使用了不同的上下文,导致调用sws.close()时死锁.
+ go func() {
+ if rerr := sws.recvLoop(); rerr != nil {
+ if isClientCtxErr(stream.Context().Err(), rerr) {
+ sws.lg.Debug("从gRPC流接收watch请求失败", zap.Error(rerr))
+ } else {
+ sws.lg.Warn("从gRPC流接收watch请求失败", zap.Error(rerr))
+ }
+ errc <- rerr
+ }
+ }()
+ select {
+ case err = <-errc:
+ if err == context.Canceled {
+ err = rpctypes.ErrGRPCWatchCanceled
+ }
+ close(sws.ctrlStream)
+ case <-stream.Context().Done():
+ err = stream.Context().Err()
+ if err == context.Canceled {
+ err = rpctypes.ErrGRPCWatchCanceled
+ }
+ }
+
+ sws.close()
+ return err
+}
+
+func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
+ authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
+ if err != nil {
+ return false
+ }
+ if authInfo == nil {
+ // if auth is enabled, IsRangePermitted() can cause an error
+ authInfo = &auth.AuthInfo{}
+ }
+ return sws.ag.AuthStore().IsRangePermitted(authInfo, []byte(wcr.Key), []byte(wcr.RangeEnd)) == nil
+}
+
+// 接收watch请求,可以是创建、取消、和
+func (sws *serverWatchStream) recvLoop() error {
+ for {
+ // 同一个连接,可以接收多次不同的消息
+ req, err := sws.gRPCStream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if req.WatchRequest_CreateRequest != nil { // 创建watcher ✅
+ uv := &pb.WatchRequest_CreateRequest{}
+ uv = req.WatchRequest_CreateRequest
+ if uv.CreateRequest == nil {
+ continue
+ }
+
+ creq := uv.CreateRequest
+ if len(creq.Key) == 0 { // a
+ // \x00 is the smallest key
+ creq.Key = string([]byte{0})
+ }
+ if len(creq.RangeEnd) == 0 {
+ // force nil since watchstream.Watch distinguishes
+ // between nil and []byte{} for single key / >=
+ creq.RangeEnd = ""
+ }
+ if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
+ // support >= key queries
+ creq.RangeEnd = string([]byte{})
+ }
+ // 权限校验
+ if !sws.isWatchPermitted(creq) { // 当前请求 权限不允许
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(sws.watchStream.Rev()),
+ WatchId: creq.WatchId,
+ Canceled: true,
+ Created: true,
+ CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
+ }
+
+ select {
+ case sws.ctrlStream <- wr:
+ continue
+ case <-sws.closec:
+ return nil
+ }
+ }
+
+ filters := FiltersFromRequest(creq) // server端 从watch请求中 获取一些过滤调价
+
+ wsrev := sws.watchStream.Rev() // 获取当前kv的修订版本
+ rev := creq.StartRevision // 监听从哪个修订版本之后的变更,没穿就是当前
+ if rev == 0 {
+ rev = wsrev + 1
+ }
+ id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), []byte(creq.Key), []byte(creq.RangeEnd), rev, filters...)
+ if err == nil {
+ sws.mu.Lock()
+ if creq.ProgressNotify { // 默认FALSE
+ sws.progress[id] = true
+ }
+ if creq.PrevKv { // 默认FALSE
+ sws.prevKV[id] = true
+ }
+ if creq.Fragment { // 拆分大的事件
+ sws.fragment[id] = true
+ }
+ sws.mu.Unlock()
+ }
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(wsrev), //
+ WatchId: int64(id),
+ Created: true,
+ Canceled: err != nil,
+ }
+ if err != nil {
+ wr.CancelReason = err.Error()
+ }
+ select {
+ case sws.ctrlStream <- wr: // 客户端创建watch的响应
+ case <-sws.closec:
+ return nil
+ }
+ }
+ if req.WatchRequest_CancelRequest != nil { // 删除watcher ✅
+ uv := &pb.WatchRequest_CancelRequest{}
+ uv = req.WatchRequest_CancelRequest
+ if uv.CancelRequest != nil {
+ id := uv.CancelRequest.WatchId
+ err := sws.watchStream.Cancel(mvcc.WatchID(id))
+ if err == nil {
+ sws.ctrlStream <- &pb.WatchResponse{
+ Header: sws.newResponseHeader(sws.watchStream.Rev()),
+ WatchId: id,
+ Canceled: true,
+ }
+ sws.mu.Lock()
+ delete(sws.progress, mvcc.WatchID(id))
+ delete(sws.prevKV, mvcc.WatchID(id))
+ delete(sws.fragment, mvcc.WatchID(id))
+ sws.mu.Unlock()
+ }
+ }
+ }
+ if req.WatchRequest_ProgressRequest != nil {
+ uv := &pb.WatchRequest_ProgressRequest{}
+ uv = req.WatchRequest_ProgressRequest
+ if uv.ProgressRequest != nil {
+ sws.ctrlStream <- &pb.WatchResponse{
+ Header: sws.newResponseHeader(sws.watchStream.Rev()),
+ WatchId: -1, // 如果发送了密钥更新,则忽略下一次进度更新
+ }
+ }
+ }
+ }
+}
+
+// 往watch stream 发送消息
+func (sws *serverWatchStream) sendLoop() {
+ // 当前活动的watcher
+ ids := make(map[mvcc.WatchID]struct{})
+ // TODO 同一个流,可能会有不同的watcher?
+ pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
+
+ interval := GetProgressReportInterval() // interval 10m44s
+ progressTicker := time.NewTicker(interval)
+
+ defer func() {
+ progressTicker.Stop()
+ }()
+
+ for {
+ select {
+ case wresp, ok := <-sws.watchStream.Chan(): // watchStream Channel中提取event发送
+ if !ok {
+ return
+ }
+ evs := wresp.Events
+ events := make([]*mvccpb.Event, len(evs))
+ sws.mu.RLock()
+ needPrevKV := sws.prevKV[wresp.WatchID]
+ sws.mu.RUnlock()
+ for i := range evs {
+ events[i] = &evs[i]
+ if needPrevKV && !IsCreateEvent(evs[i]) {
+ opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
+ r, err := sws.watchable.Range(context.TODO(), []byte(evs[i].Kv.Key), nil, opt)
+ if err == nil && len(r.KVs) != 0 {
+ events[i].PrevKv = &(r.KVs[0])
+ }
+ }
+ }
+
+ canceled := wresp.CompactRevision != 0
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(wresp.Revision),
+ WatchId: int64(wresp.WatchID),
+ Events: events,
+ CompactRevision: wresp.CompactRevision,
+ Canceled: canceled,
+ }
+ _, okID := ids[wresp.WatchID]
+ if !okID { // 当前id 不活跃
+ // 缓冲,如果ID尚未公布
+ wrs := append(pending[wresp.WatchID], wr)
+ pending[wresp.WatchID] = wrs
+ continue
+ }
+
+ sws.mu.RLock()
+ fragmented, ok := sws.fragment[wresp.WatchID] // 是否 拆分大的数据
+ sws.mu.RUnlock()
+
+ var serr error
+ if !fragmented && !ok {
+ serr = sws.gRPCStream.Send(wr)
+ } else {
+ serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send)
+ }
+
+ if serr != nil {
+ if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) {
+ sws.lg.Debug("未能向gRPC流发送watch响应", zap.Error(serr))
+ } else {
+ sws.lg.Warn("向gRPC流发送watch响应失败", zap.Error(serr))
+ }
+ return
+ }
+
+ sws.mu.Lock()
+ if len(evs) > 0 && sws.progress[wresp.WatchID] {
+ // 如果发送了密钥更新,则忽略下一次进度更新
+ sws.progress[wresp.WatchID] = false
+ }
+ sws.mu.Unlock()
+
+ case c, ok := <-sws.ctrlStream: // 流控制信号 ✅
+ // 给client回复的响应
+ if !ok {
+ return // channel关闭了
+ }
+
+ if err := sws.gRPCStream.Send(c); err != nil {
+ if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
+ sws.lg.Debug("未能向gRPC流发送watch控制响应", zap.Error(err))
+ } else {
+ sws.lg.Warn("向gRPC流发送watch控制响应失败", zap.Error(err))
+ }
+ return
+ }
+
+ // 创建 追踪id
+ wid := mvcc.WatchID(c.WatchId) // 第一次创建watcher ,id 是0
+ if c.Canceled {
+ delete(ids, wid)
+ continue
+ }
+ if c.Created {
+ ids[wid] = struct{}{}
+ for _, v := range pending[wid] {
+ if err := sws.gRPCStream.Send(v); err != nil {
+ if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
+ sws.lg.Debug("未能向gRPC流发送待处理的watch响应", zap.Error(err))
+ } else {
+ sws.lg.Warn("未能向gRPC流发送待处理的watch响应", zap.Error(err))
+ }
+ return
+ }
+ }
+ delete(pending, wid)
+ }
+
+ case <-progressTicker.C: // 定时同步状态
+ sws.mu.Lock()
+ for id, ok := range sws.progress {
+ if ok {
+ sws.watchStream.RequestProgress(id)
+ }
+ sws.progress[id] = true
+ }
+ sws.mu.Unlock()
+
+ case <-sws.closec:
+ return
+ }
+ }
+}
+
+func sendFragments(wr *pb.WatchResponse, maxRequestBytes int, sendFunc func(*pb.WatchResponse) error) error {
+ // no need to fragment if total request size is smaller
+ // than max request limit or response contains only one event
+ if wr.Size() < maxRequestBytes || len(wr.Events) < 2 {
+ return sendFunc(wr)
+ }
+
+ ow := *wr
+ ow.Events = make([]*mvccpb.Event, 0)
+ ow.Fragment = true
+
+ var idx int
+ for {
+ cur := ow
+ for _, ev := range wr.Events[idx:] {
+ cur.Events = append(cur.Events, ev)
+ if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes {
+ cur.Events = cur.Events[:len(cur.Events)-1]
+ break
+ }
+ idx++
+ }
+ if idx == len(wr.Events) {
+ // last response has no more fragment
+ cur.Fragment = false
+ }
+ if err := sendFunc(&cur); err != nil {
+ return err
+ }
+ if !cur.Fragment {
+ break
+ }
+ }
+ return nil
+}
+
+// NewWatchServer 运行初 运行一次
+func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
+ srv := &watchServer{
+ lg: s.Cfg.Logger,
+ clusterID: int64(s.Cluster().ID()),
+ memberID: int64(s.ID()),
+ maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes),
+ sg: s,
+ watchable: s.Watchable(),
+ ag: s,
+ }
+ if srv.lg == nil {
+ srv.lg = zap.NewNop()
+ }
+ if s.Cfg.WatchProgressNotifyInterval > 0 {
+ if s.Cfg.WatchProgressNotifyInterval < minWatchProgressInterval {
+ srv.lg.Warn("将watch 进度通知时间间隔调整为最小周期", zap.Duration("min-watch-progress-notify-interval", minWatchProgressInterval))
+ s.Cfg.WatchProgressNotifyInterval = minWatchProgressInterval
+ }
+ SetProgressReportInterval(s.Cfg.WatchProgressNotifyInterval)
+ }
+ return srv
+}
+
+func GetProgressReportInterval() time.Duration {
+ progressReportIntervalMu.RLock()
+ interval := progressReportInterval
+ progressReportIntervalMu.RUnlock()
+
+ // add rand(1/10*progressReportInterval) as jitter so that etcdserver will not
+ // send progress notifications to watchers around the same time even when watchers
+ // are created around the same time (which is common when a client restarts itself).
+ jitter := time.Duration(rand.Int63n(int64(interval) / 10))
+
+ return interval + jitter
+}
+
+func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
+ filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
+ for _, ft := range creq.Filters {
+ switch ft {
+ case pb.WatchCreateRequest_NOPUT:
+ filters = append(filters, filterNoPut)
+ case pb.WatchCreateRequest_NODELETE:
+ filters = append(filters, filterNoDelete)
+ default:
+ }
+ }
+ return filters
+}
+
+// 当前的修订版本
+func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(sws.clusterID),
+ MemberId: uint64(sws.memberID),
+ Revision: rev,
+ RaftTerm: sws.sg.Term(),
+ }
+}
+
+func IsCreateEvent(e mvccpb.Event) bool {
+ return e.Type == mvccpb.PUT && e.Kv.CreateRevision == e.Kv.ModRevision
+}
+
+func (sws *serverWatchStream) close() {
+ sws.watchStream.Close()
+ close(sws.closec)
+ sws.wg.Wait()
+}
+
+func filterNoDelete(e mvccpb.Event) bool {
+ return e.Type == mvccpb.DELETE
+}
+
+func filterNoPut(e mvccpb.Event) bool {
+ return e.Type == mvccpb.PUT
+}
diff --git a/etcd/etcdserver/api/v3rpc/util.go b/etcd/etcdserver/api/v3rpc/util.go
new file mode 100644
index 00000000000..ed3dcf76aa9
--- /dev/null
+++ b/etcd/etcdserver/api/v3rpc/util.go
@@ -0,0 +1,148 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+var toGRPCErrorMap = map[error]error{
+ membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound,
+ membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound,
+ membership.ErrIDExists: rpctypes.ErrGRPCMemberExist,
+ membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist,
+ membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner,
+ membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners,
+ etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted,
+ etcdserver.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady,
+
+ mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted,
+ mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev,
+ etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge,
+ etcdserver.ErrNoSpace: rpctypes.ErrGRPCNoSpace,
+ etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests,
+
+ etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
+ etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
+ etcdserver.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged,
+ etcdserver.ErrStopped: rpctypes.ErrGRPCStopped,
+ etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
+ etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
+ etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
+ etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
+ etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
+ etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
+ etcdserver.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee,
+
+ etcdserver.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable,
+ etcdserver.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat,
+ etcdserver.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion,
+ etcdserver.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess,
+ etcdserver.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade,
+
+ lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound,
+ lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist,
+ lease.ErrLeaseTTLTooLarge: rpctypes.ErrGRPCLeaseTTLTooLarge,
+
+ auth.ErrRootUserNotExist: rpctypes.ErrGRPCRootUserNotExist,
+ auth.ErrRootRoleNotExist: rpctypes.ErrGRPCRootRoleNotExist,
+ auth.ErrUserAlreadyExist: rpctypes.ErrGRPCUserAlreadyExist,
+ auth.ErrUserEmpty: rpctypes.ErrGRPCUserEmpty,
+ auth.ErrUserNotFound: rpctypes.ErrGRPCUserNotFound,
+ auth.ErrRoleAlreadyExist: rpctypes.ErrGRPCRoleAlreadyExist,
+ auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound,
+ auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty,
+ auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed,
+ auth.ErrPermissionNotGiven: rpctypes.ErrGRPCPermissionNotGiven,
+ auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied,
+ auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted,
+ auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted,
+ auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled,
+ auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken,
+ auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt,
+ auth.ErrAuthOldRevision: rpctypes.ErrGRPCAuthOldRevision,
+
+ // In sync with status.FromContextError
+ context.Canceled: rpctypes.ErrGRPCCanceled,
+ context.DeadlineExceeded: rpctypes.ErrGRPCDeadlineExceeded,
+}
+
+func togRPCError(err error) error {
+ // let gRPC etcd convert to codes.Canceled, codes.DeadlineExceeded
+ if err == context.Canceled || err == context.DeadlineExceeded {
+ return err
+ }
+ grpcErr, ok := toGRPCErrorMap[err]
+ if !ok {
+ return status.Error(codes.Unknown, err.Error())
+ }
+ return grpcErr
+}
+
+func isClientCtxErr(ctxErr error, err error) bool {
+ if ctxErr != nil {
+ return true
+ }
+
+ ev, ok := status.FromError(err)
+ if !ok {
+ return false
+ }
+
+ switch ev.Code() {
+ case codes.Canceled, codes.DeadlineExceeded:
+ // client-side context cancel or deadline exceeded
+ // "rpc error: code = Canceled desc = context canceled"
+ // "rpc error: code = DeadlineExceeded desc = context deadline exceeded"
+ return true
+ case codes.Unavailable:
+ msg := ev.Message()
+ // client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected")
+ // "rpc error: code = Unavailable desc = client disconnected"
+ if msg == "client disconnected" {
+ return true
+ }
+ // "grpc/transport.ClientTransport.CloseStream" on canceled streams
+ // "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL")
+ if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") {
+ return true
+ }
+ }
+ return false
+}
+
+// 在v3.4中learner 被允许提供可序列化的读取和端点状态服务.
+func isRPCSupportedForLearner(req interface{}) bool {
+ switch r := req.(type) {
+ case *pb.StatusRequest:
+ return true
+ case *pb.RangeRequest:
+ return r.Serializable
+ default:
+ return false
+ }
+}
diff --git a/etcd/etcdserver/api_downgrade.go b/etcd/etcdserver/api_downgrade.go
new file mode 100644
index 00000000000..f63032c0f4d
--- /dev/null
+++ b/etcd/etcdserver/api_downgrade.go
@@ -0,0 +1,56 @@
+package etcdserver
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strconv"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "go.uber.org/zap"
+)
+
+func (s *EtcdServer) DowngradeEnabledHandler() http.Handler {
+ return &downgradeEnabledHandler{
+ lg: s.Logger(),
+ cluster: s.cluster,
+ server: s,
+ }
+}
+
+type downgradeEnabledHandler struct {
+ lg *zap.Logger
+ cluster api.Cluster
+ server *EtcdServer
+}
+
+func (h *downgradeEnabledHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ if r.URL.Path != DowngradeEnabledPath {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), h.server.Cfg.ReqTimeout())
+ defer cancel()
+
+ // serve with linearized downgrade info
+ if err := h.server.linearizeReadNotify(ctx); err != nil {
+ http.Error(w, fmt.Sprintf("failed linearized read: %v", err),
+ http.StatusInternalServerError)
+ return
+ }
+ enabled := h.server.DowngradeInfo().Enabled
+ w.Header().Set("Content-Type", "text/plain")
+ w.Write([]byte(strconv.FormatBool(enabled)))
+}
+
+func (s *EtcdServer) DowngradeInfo() *membership.DowngradeInfo { return s.cluster.DowngradeInfo() }
diff --git a/etcd/etcdserver/api_hashkv.go b/etcd/etcdserver/api_hashkv.go
new file mode 100644
index 00000000000..48bbe166806
--- /dev/null
+++ b/etcd/etcdserver/api_hashkv.go
@@ -0,0 +1,120 @@
+package etcdserver
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "go.uber.org/zap"
+)
+
+const PeerHashKVPath = "/members/hashkv"
+
+type hashKVHandler struct {
+ lg *zap.Logger
+ server *EtcdServer
+}
+
+func (s *EtcdServer) HashKVHandler() http.Handler {
+ return &hashKVHandler{lg: s.Logger(), server: s}
+}
+
+func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+ if r.URL.Path != PeerHashKVPath {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+
+ defer r.Body.Close()
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "读取body失败", http.StatusBadRequest)
+ return
+ }
+
+ req := &pb.HashKVRequest{}
+ if err := json.Unmarshal(b, req); err != nil {
+ h.lg.Warn("反序列化请求数据失败", zap.Error(err))
+ http.Error(w, "反序列化请求数据失败", http.StatusBadRequest)
+ return
+ }
+ hash, rev, compactRev, err := h.server.KV().HashByRev(req.Revision)
+ if err != nil {
+ h.lg.Warn(
+ "获取hash值失败",
+ zap.Int64("requested-revision", req.Revision),
+ zap.Error(err),
+ )
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: hash, CompactRevision: compactRev}
+ respBytes, err := json.Marshal(resp)
+ if err != nil {
+ h.lg.Warn("failed to marshal hashKV response", zap.Error(err))
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.server.Cluster().ID().String())
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(respBytes)
+}
+
+// getPeerHashKVHTTP 通过对给定网址的http调用在给定的rev中获取kv存储的哈希值.
+func (s *EtcdServer) getPeerHashKVHTTP(ctx context.Context, url string, rev int64) (*pb.HashKVResponse, error) {
+ cc := &http.Client{Transport: s.peerRt}
+ hashReq := &pb.HashKVRequest{Revision: rev} // revision是哈希操作的键值存储修订版.
+ hashReqBytes, err := json.Marshal(hashReq)
+ if err != nil {
+ return nil, err
+ }
+ requestUrl := url + PeerHashKVPath
+ req, err := http.NewRequest(http.MethodGet, requestUrl, bytes.NewReader(hashReqBytes))
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+ req.Header.Set("Content-Type", "application/json")
+ req.Cancel = ctx.Done()
+
+ resp, err := cc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode == http.StatusBadRequest {
+ if strings.Contains(string(b), mvcc.ErrCompacted.Error()) {
+ return nil, rpctypes.ErrCompacted
+ }
+ if strings.Contains(string(b), mvcc.ErrFutureRev.Error()) {
+ return nil, rpctypes.ErrFutureRev
+ }
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("unknown error: %s", string(b))
+ }
+
+ hashResp := &pb.HashKVResponse{}
+ if err := json.Unmarshal(b, hashResp); err != nil {
+ return nil, err
+ }
+ return hashResp, nil
+}
diff --git a/etcd/etcdserver/api_lease.go b/etcd/etcdserver/api_lease.go
new file mode 100644
index 00000000000..04247950e11
--- /dev/null
+++ b/etcd/etcdserver/api_lease.go
@@ -0,0 +1,15 @@
+package etcdserver
+
+import (
+ "net/http"
+
+ "github.com/ls-2018/etcd_cn/etcd/lease/leasehttp"
+)
+
+func (s *EtcdServer) LeaseHandler() http.Handler {
+ if s.lessor == nil {
+ return nil
+ }
+ return leasehttp.NewHandler(s.lessor, s.ApplyWait)
+}
+func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
diff --git a/etcd/etcdserver/api_raft_status_getter.go b/etcd/etcdserver/api_raft_status_getter.go
new file mode 100644
index 00000000000..f0989fb971a
--- /dev/null
+++ b/etcd/etcdserver/api_raft_status_getter.go
@@ -0,0 +1,23 @@
+package etcdserver
+
+import "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+
+type RaftStatusGetter interface {
+ ID() types.ID
+ Leader() types.ID
+ CommittedIndex() uint64
+ AppliedIndex() uint64
+ Term() uint64
+}
+
+func (s *EtcdServer) ID() types.ID { return s.id }
+
+func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) }
+
+func (s *EtcdServer) Lead() uint64 { return s.getLead() }
+
+func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() }
+
+func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() }
+
+func (s *EtcdServer) Term() uint64 { return s.getTerm() }
diff --git a/etcd/etcdserver/apply_auth.go b/etcd/etcdserver/apply_auth.go
new file mode 100644
index 00000000000..c8644dd09c1
--- /dev/null
+++ b/etcd/etcdserver/apply_auth.go
@@ -0,0 +1,248 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "sync"
+
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+)
+
+type authApplierV3 struct {
+ applierV3 // applierV3backend
+ as auth.AuthStore // 内循环时,提供认证token
+ lessor lease.Lessor // 租约管理者
+ // mu serializes Apply so that user isn't corrupted and so that
+ // serialized requests don't leak data from TOCTOU errors
+ mu sync.Mutex
+ authInfo auth.AuthInfo
+}
+
+func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 {
+ return &authApplierV3{applierV3: base, as: as, lessor: lessor}
+}
+
+func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult {
+ aa.mu.Lock()
+ defer aa.mu.Unlock()
+ if r.Header != nil {
+ // 当internalRaftRequest没有header时,向后兼容3.0之前的版本
+ aa.authInfo.Username = r.Header.Username
+ aa.authInfo.Revision = r.Header.AuthRevision
+ }
+ if needAdminPermission(r) {
+ if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &applyResult{err: err}
+ }
+ }
+ ret := aa.applierV3.Apply(r, shouldApplyV3)
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return ret
+}
+
+func (aa *authApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(r.Key)); err != nil {
+ return nil, nil, err
+ }
+
+ if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil {
+ // The specified lease is already attached with a key that cannot
+ // backend written by this user. It means the user cannot revoke the
+ // lease so attaching the lease to the newly written key should
+ // backend forbidden.
+ return nil, nil, err
+ }
+
+ if r.PrevKv {
+ err := aa.as.IsRangePermitted(&aa.authInfo, []byte(r.Key), nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return aa.applierV3.Put(ctx, txn, r)
+}
+
+func (aa *authApplierV3) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ if err := aa.as.IsRangePermitted(&aa.authInfo, []byte(r.Key), []byte(r.RangeEnd)); err != nil {
+ return nil, err
+ }
+ return aa.applierV3.Range(ctx, txn, r)
+}
+
+func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, []byte(r.Key), []byte(r.RangeEnd)); err != nil {
+ return nil, err
+ }
+ if r.PrevKv {
+ err := aa.as.IsRangePermitted(&aa.authInfo, []byte(r.Key), []byte(r.RangeEnd)) // {a,b true}
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return aa.applierV3.DeleteRange(txn, r)
+}
+
+func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
+ for _, requ := range reqs {
+ if requ.RequestOp_RequestRange != nil {
+ tv := requ.RequestOp_RequestRange
+ if tv.RequestRange == nil {
+ continue
+ }
+ if err := as.IsRangePermitted(ai, []byte(tv.RequestRange.Key), []byte(tv.RequestRange.RangeEnd)); err != nil {
+ return err
+ }
+
+ }
+ if requ.RequestOp_RequestPut != nil {
+ tv := requ.RequestOp_RequestPut
+ if tv.RequestPut == nil {
+ continue
+ }
+
+ if err := as.IsPutPermitted(ai, []byte(tv.RequestPut.Key)); err != nil {
+ return err
+ }
+
+ }
+ if requ.RequestOp_RequestDeleteRange != nil {
+ tv := requ.RequestOp_RequestDeleteRange
+ if tv.RequestDeleteRange == nil {
+ continue
+ }
+
+ if tv.RequestDeleteRange.PrevKv {
+ err := as.IsRangePermitted(ai, []byte(tv.RequestDeleteRange.Key), []byte(tv.RequestDeleteRange.RangeEnd))
+ if err != nil {
+ return err
+ }
+ }
+
+ err := as.IsDeleteRangePermitted(ai, []byte(tv.RequestDeleteRange.Key), []byte(tv.RequestDeleteRange.RangeEnd))
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
+ for _, c := range rt.Compare {
+ if err := as.IsRangePermitted(ai, []byte(c.Key), []byte(c.RangeEnd)); err != nil {
+ return err
+ }
+ }
+ if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
+ return err
+ }
+ return checkTxnReqsPermission(as, ai, rt.Failure)
+}
+
+func (aa *authApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
+ return nil, nil, err
+ }
+ return aa.applierV3.Txn(ctx, rt)
+}
+
+func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil { // 检查租约是否存在
+ return nil, err
+ }
+ return aa.applierV3.LeaseRevoke(lc)
+}
+
+// 检查租约更新的key是否有权限操作
+func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
+ lease := aa.lessor.Lookup(leaseID)
+ if lease != nil {
+ for _, key := range lease.Keys() {
+ if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ err := aa.as.IsAdminPermitted(&aa.authInfo)
+ if err != nil && r.Name != aa.authInfo.Username {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &pb.AuthUserGetResponse{}, err
+ }
+
+ return aa.applierV3.UserGet(r)
+}
+
+func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ err := aa.as.IsAdminPermitted(&aa.authInfo)
+ if err != nil && !aa.as.UserHasRole(aa.authInfo.Username, r.Role) {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &pb.AuthRoleGetResponse{}, err
+ }
+
+ return aa.applierV3.RoleGet(r)
+}
+
+func needAdminPermission(r *pb.InternalRaftRequest) bool {
+ switch {
+ case r.AuthEnable != nil:
+ return true
+ case r.AuthDisable != nil:
+ return true
+ case r.AuthStatus != nil:
+ return true
+ case r.AuthUserAdd != nil:
+ return true
+ case r.AuthUserDelete != nil:
+ return true
+ case r.AuthUserChangePassword != nil:
+ return true
+ case r.AuthUserGrantRole != nil:
+ return true
+ case r.AuthUserRevokeRole != nil:
+ return true
+ case r.AuthRoleAdd != nil:
+ return true
+ case r.AuthRoleGrantPermission != nil:
+ return true
+ case r.AuthRoleRevokePermission != nil:
+ return true
+ case r.AuthRoleDelete != nil:
+ return true
+ case r.AuthUserList != nil:
+ return true
+ case r.AuthRoleList != nil:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/server/etcdserver/apply_v2.go b/etcd/etcdserver/apply_v2.go
similarity index 80%
rename from server/etcdserver/apply_v2.go
rename to etcd/etcdserver/apply_v2.go
index c9e4c3e87b0..9f019cca0fc 100644
--- a/server/etcdserver/apply_v2.go
+++ b/etcd/etcdserver/apply_v2.go
@@ -16,26 +16,18 @@ package etcdserver
import (
"encoding/json"
- "fmt"
"path"
"time"
- "unicode/utf8"
"github.com/coreos/go-semver/semver"
-
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/server/v3/etcdserver/api"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
- "go.etcd.io/etcd/server/v3/etcdserver/errors"
- "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
"go.uber.org/zap"
)
-const v2Version = "v2"
-
-// ApplierV2 is the interface for processing V2 raft messages
type ApplierV2 interface {
Delete(r *RequestV2) Response
Post(r *RequestV2) Response
@@ -100,7 +92,7 @@ func (a *applierV2store) Put(r *RequestV2, shouldApplyV3 membership.ShouldApplyV
// TODO remove v2 version set to avoid the conflict between v2 and v3 in etcd 3.6
if r.Path == membership.StoreClusterVersionKey() {
if a.cluster != nil {
- // persist to backend given v2store can be very stale
+ // persist to backend given v2store can backend very stale
a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability, shouldApplyV3)
}
return Response{}
@@ -121,20 +113,6 @@ func (a *applierV2store) Sync(r *RequestV2) Response {
// applyV2Request interprets r as a call to v2store.X
// and returns a Response interpreted from v2store.Event
func (s *EtcdServer) applyV2Request(r *RequestV2, shouldApplyV3 membership.ShouldApplyV3) (resp Response) {
- stringer := panicAlternativeStringer{
- stringer: r,
- alternative: func() string { return fmt.Sprintf("id:%d,method:%s,path:%s", r.ID, r.Method, r.Path) },
- }
- defer func(start time.Time) {
- if !utf8.ValidString(r.Method) {
- s.lg.Info("method is not valid utf-8")
- return
- }
- success := resp.Err == nil
- txn.ApplySecObserve(v2Version, r.Method, success, time.Since(start))
- txn.WarnOfExpensiveRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, stringer, nil, nil)
- }(time.Now())
-
switch r.Method {
case "POST":
return s.applyV2.Post(r)
@@ -147,8 +125,8 @@ func (s *EtcdServer) applyV2Request(r *RequestV2, shouldApplyV3 membership.Shoul
case "SYNC":
return s.applyV2.Sync(r)
default:
- // This should never be reached, but just in case:
- return Response{Err: errors.ErrUnknownMethod}
+ // This should never backend reached, but just in case:
+ return Response{Err: ErrUnknownMethod}
}
}
diff --git a/etcd/etcdserver/apply_v3.go b/etcd/etcdserver/apply_v3.go
new file mode 100644
index 00000000000..1141d196a2b
--- /dev/null
+++ b/etcd/etcdserver/apply_v3.go
@@ -0,0 +1,1025 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sort"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/membershippb"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+
+ "github.com/gogo/protobuf/proto"
+ "go.uber.org/zap"
+)
+
+type applyResult struct {
+ resp proto.Message
+ err error
+ physc <-chan struct{} // disk、内存都写好数据了
+ trace *traceutil.Trace
+}
+
+// applierV3Internal 内部v3 raft 请求
+type applierV3Internal interface {
+ ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3)
+ ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3)
+ DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3)
+}
+
+type applierV3 interface {
+ Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult
+ Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error)
+ Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
+ DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
+ Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error)
+ Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error)
+ LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
+ LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
+ LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error)
+ Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
+ Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
+ AuthEnable() (*pb.AuthEnableResponse, error)
+ AuthDisable() (*pb.AuthDisableResponse, error)
+ AuthStatus() (*pb.AuthStatusResponse, error)
+ UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+ UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+ UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+ UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+ UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+ UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+ RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+ RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+ RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+ RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+ RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+ UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+ RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+}
+
+type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error
+
+type applierV3backend struct {
+ s *EtcdServer
+ checkPut checkReqFunc
+ checkRange checkReqFunc
+}
+
+func (s *EtcdServer) newApplierV3Backend() applierV3 {
+ base := &applierV3backend{s: s}
+ base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error {
+ return base.checkRequestPut(rv, req)
+ }
+ base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error {
+ return base.checkRequestRange(rv, req)
+ }
+ return base
+}
+
+func (s *EtcdServer) newApplierV3Internal() applierV3Internal {
+ base := &applierV3backend{s: s}
+ return base
+}
+
+func (s *EtcdServer) newApplierV3() applierV3 {
+ return newAuthApplierV3(s.AuthStore(), newQuotaApplierV3(s, s.newApplierV3Backend()), s.lessor)
+}
+
+// Put raft 传递之后 实际上将k,v存储到应用内的逻辑
+func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) {
+ resp = &pb.PutResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ trace = traceutil.Get(ctx)
+ // 如果上下文中的trace为空,则创建put跟踪
+ if trace.IsEmpty {
+ trace = traceutil.New("put",
+ a.s.Logger(),
+ traceutil.Field{Key: "key", Value: string([]byte(p.Key))},
+ traceutil.Field{Key: "req_size", Value: p.Size()},
+ )
+ }
+ val, leaseID := p.Value, lease.LeaseID(p.Lease)
+ if txn == nil { // 写事务
+ if leaseID != lease.NoLease {
+ if l := a.s.lessor.Lookup(leaseID); l == nil { // 查找租约
+ return nil, nil, lease.ErrLeaseNotFound
+ }
+ }
+ // watchableStoreTxnWrite[storeTxnWrite]
+ txn = a.s.KV().Write(trace)
+ defer txn.End()
+ }
+
+ var rr *mvcc.RangeResult
+ if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
+ trace.StepWithFunction(func() {
+ rr, err = txn.Range(context.TODO(), []byte(p.Key), nil, mvcc.RangeOptions{})
+ }, "得到之前的kv对")
+
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ if p.IgnoreValue || p.IgnoreLease {
+ if rr == nil || len(rr.KVs) == 0 {
+ // ignore_{lease,value} flag expects previous key-value pair
+ return nil, nil, ErrKeyNotFound
+ }
+ }
+ if p.IgnoreValue {
+ val = rr.KVs[0].Value
+ }
+ if p.IgnoreLease {
+ leaseID = lease.LeaseID(rr.KVs[0].Lease)
+ }
+ if p.PrevKv {
+ if rr != nil && len(rr.KVs) != 0 {
+ resp.PrevKv = &rr.KVs[0]
+ }
+ }
+ resp.Header.Revision = txn.Put([]byte(p.Key), []byte(val), leaseID)
+ trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
+ return resp, trace, nil
+}
+
+func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ resp := &pb.DeleteRangeResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ end := mkGteRange([]byte(dr.RangeEnd))
+
+ if txn == nil {
+ txn = a.s.kv.Write(traceutil.TODO()) // 创建写事务
+ defer txn.End()
+ }
+
+ if dr.PrevKv { //
+ rr, err := txn.Range(context.TODO(), []byte(dr.Key), end, mvcc.RangeOptions{})
+ if err != nil {
+ return nil, err
+ }
+ if rr != nil {
+ resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+ for i := range rr.KVs {
+ resp.PrevKvs[i] = &rr.KVs[i]
+ }
+ }
+ }
+ // storeTxnWrite
+ resp.Deleted, resp.Header.Revision = txn.DeleteRange([]byte(dr.Key), end)
+ return resp, nil
+}
+
+func (a *applierV3backend) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ trace := traceutil.Get(ctx)
+ if trace.IsEmpty {
+ trace = traceutil.New("transaction", a.s.Logger())
+ ctx = context.WithValue(ctx, traceutil.TraceKey, trace)
+ }
+ isWrite := !isTxnReadonly(rt)
+
+ // When the transaction contains write operations, we use ReadTx instead of
+ // ConcurrentReadTx to avoid extra overhead of copying buffer.
+ var txn mvcc.TxnWrite
+ if isWrite && a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer {
+ txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.SharedBufReadTxMode, trace))
+ } else {
+ txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.ConcurrentReadTxMode, trace))
+ }
+
+ var txnPath []bool
+ trace.StepWithFunction(
+ func() {
+ txnPath = compareToPath(txn, rt)
+ },
+ "compare",
+ )
+
+ if isWrite {
+ trace.AddField(traceutil.Field{Key: "read_only", Value: false})
+ if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil {
+ txn.End()
+ return nil, nil, err
+ }
+ }
+ if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil {
+ txn.End()
+ return nil, nil, err
+ }
+ trace.Step("check requests")
+ txnResp, _ := newTxnResp(rt, txnPath)
+
+ // When executing mutable txn ops, etcd must hold the txn lock so
+ // readers do not see any intermediate results. Since writes are
+ // serialized on the raft loop, the revision in the read view will
+ // backend the revision of the write txn.
+ if isWrite {
+ txn.End()
+ txn = a.s.KV().Write(trace)
+ }
+ a.applyTxn(ctx, txn, rt, txnPath, txnResp)
+ rev := txn.Rev()
+ if len(txn.Changes()) != 0 {
+ rev++
+ }
+ txn.End()
+
+ txnResp.Header.Revision = rev
+ trace.AddField(
+ traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)},
+ traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision},
+ )
+ return txnResp, trace, nil
+}
+
+// newTxnResp allocates a txn response for a txn request given a path.
+func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) {
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+ resps := make([]*pb.ResponseOp, len(reqs))
+ txnResp = &pb.TxnResponse{
+ Responses: resps,
+ Succeeded: txnPath[0],
+ Header: &pb.ResponseHeader{},
+ }
+ for i, req := range reqs {
+ if req.RequestOp_RequestRange != nil {
+ resps[i] = &pb.ResponseOp{ResponseOp_ResponseRange: &pb.ResponseOp_ResponseRange{}}
+ }
+ if req.RequestOp_RequestPut != nil {
+ resps[i] = &pb.ResponseOp{ResponseOp_ResponsePut: &pb.ResponseOp_ResponsePut{}}
+ }
+ if req.RequestOp_RequestDeleteRange != nil {
+ resps[i] = &pb.ResponseOp{ResponseOp_ResponseDeleteRange: &pb.ResponseOp_ResponseDeleteRange{}}
+ }
+ if req.RequestOp_RequestTxn != nil {
+ resp, txns := newTxnResp(req.RequestOp_RequestTxn.RequestTxn, txnPath[1:])
+ resps[i] = &pb.ResponseOp{ResponseOp_ResponseTxn: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}}
+ txnPath = txnPath[1+txns:]
+ txnCount += txns + 1
+ }
+
+ }
+ return txnResp, txnCount
+}
+
+func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool {
+ txnPath := make([]bool, 1)
+ ops := rt.Success
+ if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] {
+ ops = rt.Failure
+ }
+ for _, op := range ops {
+ tv := op.RequestOp_RequestTxn
+ if tv == nil || tv.RequestTxn == nil {
+ continue
+ }
+
+ txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...)
+ }
+ return txnPath
+}
+
+func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool {
+ for _, c := range cmps {
+ if !applyCompare(rv, c) {
+ return false
+ }
+ }
+ return true
+}
+
+// applyCompare applies the compare request.
+// If the comparison succeeds, it returns true. Otherwise, returns false.
+func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
+ // TODO: possible optimizations
+ // * chunk reads for large ranges to conserve memory
+ // * rewrite rules for common patterns:
+ // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0"
+ // * caching
+ rr, err := rv.Range(context.TODO(), []byte(c.Key), mkGteRange([]byte(c.RangeEnd)), mvcc.RangeOptions{})
+ if err != nil {
+ return false
+ }
+ if len(rr.KVs) == 0 {
+ if c.Target == pb.Compare_VALUE {
+ // Always fail if comparing a value on a key/keys that doesn't exist;
+ // nil == empty string in grpc; no way to represent missing value
+ return false
+ }
+ return compareKV(c, mvccpb.KeyValue{})
+ }
+ for _, kv := range rr.KVs {
+ if !compareKV(c, kv) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
+ var result int
+ rev := int64(0)
+ switch c.Target {
+ case pb.Compare_VALUE:
+ v := []byte{}
+
+ if c.Compare_Value != nil {
+ v = []byte(c.Compare_Value.Value)
+ }
+
+ result = bytes.Compare([]byte(ckv.Value), v)
+ case pb.Compare_CREATE:
+ if c.Compare_CreateRevision != nil {
+ rev = c.Compare_CreateRevision.CreateRevision
+ }
+ result = compareInt64(ckv.CreateRevision, rev)
+ case pb.Compare_MOD:
+ if c.Compare_ModRevision != nil {
+ rev = c.Compare_ModRevision.ModRevision
+ }
+ result = compareInt64(ckv.ModRevision, rev)
+ case pb.Compare_VERSION:
+ if c.Compare_Version != nil {
+ rev = c.Compare_Version.Version
+ }
+ result = compareInt64(ckv.Version, rev)
+ case pb.Compare_LEASE:
+ if c.Compare_Lease != nil {
+ rev = c.Compare_Lease.Lease
+ }
+ result = compareInt64(ckv.Lease, rev)
+ }
+ switch c.Result {
+ case pb.Compare_EQUAL:
+ return result == 0
+ case pb.Compare_NOT_EQUAL:
+ return result != 0
+ case pb.Compare_GREATER:
+ return result > 0
+ case pb.Compare_LESS:
+ return result < 0
+ }
+ return true
+}
+
+func (a *applierV3backend) applyTxn(ctx context.Context, txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) {
+ trace := traceutil.Get(ctx)
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+
+ lg := a.s.Logger()
+ for i, req := range reqs {
+
+ if req.RequestOp_RequestRange != nil {
+ respi := tresp.Responses[i].ResponseOp_ResponseRange
+ tv := req.RequestOp_RequestRange
+ trace.StartSubTrace(
+ traceutil.Field{Key: "req_type", Value: "range"},
+ traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)},
+ traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)})
+ resp, err := a.Range(ctx, txn, tv.RequestRange)
+ if err != nil {
+ lg.Panic("unexpected error during txn", zap.Error(err))
+ }
+ respi.ResponseRange = resp
+ trace.StopSubTrace()
+
+ }
+ if req.RequestOp_RequestPut != nil {
+ respi := tresp.Responses[i].ResponseOp_ResponsePut
+ tv := req.RequestOp_RequestPut
+ trace.StartSubTrace(
+ traceutil.Field{Key: "req_type", Value: "put"},
+ traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)},
+ traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()})
+ resp, _, err := a.Put(ctx, txn, tv.RequestPut)
+ if err != nil {
+ lg.Panic("unexpected error during txn", zap.Error(err))
+ }
+ respi.ResponsePut = resp
+ trace.StopSubTrace()
+ }
+
+ if req.RequestOp_RequestDeleteRange != nil {
+ respi := tresp.Responses[i].ResponseOp_ResponseDeleteRange
+ tv := req.RequestOp_RequestDeleteRange
+ resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
+ if err != nil {
+ lg.Panic("unexpected error during txn", zap.Error(err))
+ }
+ respi.ResponseDeleteRange = resp
+ }
+ if req.RequestOp_RequestTxn != nil {
+ resp := tresp.Responses[i].ResponseOp_ResponseTxn.ResponseTxn
+ tv := req.RequestOp_RequestTxn
+ applyTxns := a.applyTxn(ctx, txn, tv.RequestTxn, txnPath[1:], resp)
+ txns += applyTxns + 1
+ txnPath = txnPath[applyTxns+1:]
+ }
+
+ }
+ return txns
+}
+
+// Compaction 移除kv 历史事件
+func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
+ resp := &pb.CompactionResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ trace := traceutil.New("compact",
+ a.s.Logger(),
+ traceutil.Field{Key: "revision", Value: compaction.Revision},
+ )
+
+ ch, err := a.s.KV().Compact(trace, compaction.Revision)
+ if err != nil {
+ return nil, ch, nil, err
+ }
+ // 获得当前版本.拿哪把key并不重要.
+ rr, _ := a.s.KV().Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{})
+ resp.Header.Revision = rr.Rev
+ return resp, ch, trace, err
+}
+
+func (a *applierV3backend) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
+ a.s.cluster.SetVersion(semver.Must(semver.NewVersion(r.Ver)), api.UpdateCapability, shouldApplyV3)
+}
+
+func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
+ a.s.cluster.UpdateAttributes(
+ types.ID(r.Member_ID),
+ membership.Attributes{
+ Name: r.MemberAttributes.Name,
+ ClientURLs: r.MemberAttributes.ClientUrls,
+ },
+ shouldApplyV3,
+ )
+}
+
+func (a *applierV3backend) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
+ d := membership.DowngradeInfo{Enabled: false}
+ if r.Enabled {
+ d = membership.DowngradeInfo{Enabled: true, TargetVersion: r.Ver}
+ }
+ a.s.cluster.SetDowngradeInfo(&d, shouldApplyV3)
+}
+
+func (a *quotaApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ ok := a.q.Available(rt)
+ resp, trace, err := a.applierV3.Txn(ctx, rt)
+ if err == nil && !ok {
+ err = ErrNoSpace
+ }
+ return resp, trace, err
+}
+
+func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) {
+ txnCount := 0
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+ for _, req := range reqs {
+ // tv, ok := req.Request.(*pb.RequestOp_RequestTxn)
+ tv := req.RequestOp_RequestTxn
+ if req.RequestOp_RequestTxn != nil && req.RequestOp_RequestTxn.RequestTxn != nil {
+ txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f)
+ if err != nil {
+ return 0, err
+ }
+ txnCount += txns + 1
+ txnPath = txnPath[txns+1:]
+ continue
+ }
+ if err := f(rv, req); err != nil {
+ return 0, err
+ }
+ }
+ return txnCount, nil
+}
+
+func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
+ if reqOp.RequestOp_RequestPut == nil {
+ return nil
+ }
+ if reqOp.RequestOp_RequestPut.RequestPut == nil {
+ return nil
+ }
+
+ req := reqOp.RequestOp_RequestPut.RequestPut
+ if req.IgnoreValue || req.IgnoreLease {
+ // expects previous key-value, error if not exist
+ rr, err := rv.Range(context.TODO(), []byte(req.Key), nil, mvcc.RangeOptions{})
+ if err != nil {
+ return err
+ }
+ if rr == nil || len(rr.KVs) == 0 {
+ return ErrKeyNotFound
+ }
+ }
+ if lease.LeaseID(req.Lease) != lease.NoLease {
+ if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil {
+ return lease.ErrLeaseNotFound
+ }
+ }
+ return nil
+}
+
+func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
+ if reqOp.RequestOp_RequestRange == nil {
+ return nil
+ }
+ if reqOp.RequestOp_RequestRange.RequestRange == nil {
+ return nil
+ }
+
+ req := reqOp.RequestOp_RequestRange.RequestRange
+ switch {
+ case req.Revision == 0:
+ return nil
+ case req.Revision > rv.Rev():
+ return mvcc.ErrFutureRev
+ case req.Revision < rv.FirstRev():
+ return mvcc.ErrCompacted
+ }
+ return nil
+}
+
+func noSideEffect(r *pb.InternalRaftRequest) bool {
+ return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil
+}
+
+func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
+ f := func(ops []*pb.RequestOp) []*pb.RequestOp {
+ j := 0
+ for i := 0; i < len(ops); i++ {
+ if ops[i].RequestOp_RequestRange != nil {
+ continue
+ }
+ ops[j] = ops[i]
+ j++
+ }
+
+ return ops[:j]
+ }
+
+ txn.Success = f(txn.Success)
+ txn.Failure = f(txn.Failure)
+}
+
+// ---------------------------------------- OVER ------------------------------------------------------------
+
+func newHeader(s *EtcdServer) *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(s.Cluster().ID()),
+ MemberId: uint64(s.ID()),
+ Revision: s.KV().Rev(), // 在打开txn时返回KV的修订
+ RaftTerm: s.Term(),
+ }
+}
+
+func compareInt64(a, b int64) int {
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+}
+
+// 修剪查询到的数据
+func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
+ j := 0
+ for i := range rr.KVs {
+ rr.KVs[j] = rr.KVs[i]
+ if !isPrunable(&rr.KVs[i]) {
+ j++
+ }
+ }
+ rr.KVs = rr.KVs[:j]
+}
+
+// Range 👌🏻
+func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ trace := traceutil.Get(ctx)
+ resp := &pb.RangeResponse{}
+ resp.Header = &pb.ResponseHeader{}
+
+ if txn == nil {
+ txn = a.s.kv.Read(mvcc.ConcurrentReadTxMode, trace) // 并发读取,获取事务
+ defer txn.End()
+ }
+
+ limit := r.Limit
+ // 有序
+ if r.SortOrder != pb.RangeRequest_NONE || r.MinModRevision != 0 || r.MaxModRevision != 0 || r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
+ // 最大、最小 创建版本、修订版本
+ // 获取一切;然后进行排序和截断
+ limit = 0
+ }
+ if limit > 0 {
+ // 获取一个额外的'more'标志
+ limit = limit + 1
+ }
+
+ ro := mvcc.RangeOptions{
+ Limit: limit, // 0
+ Rev: r.Revision, // 0
+ Count: r.CountOnly, // false
+ }
+ // 主要逻辑
+ rr, err := txn.Range(ctx, []byte(r.Key), mkGteRange([]byte(r.RangeEnd)), ro)
+ if err != nil {
+ return nil, err
+ }
+ // 修剪查询到的数据
+ if r.MaxModRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MinModRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MaxCreateRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MinCreateRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
+ pruneKVs(rr, f)
+ }
+
+ sortOrder := r.SortOrder // 默认不排序
+ // 默认是请求的key
+ if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
+ // 因为当前mvcc.Range实现返回按字序升序排序的结果,默认情况下,只有当target不是'KEY'时,排序才会升序.
+ sortOrder = pb.RangeRequest_ASCEND
+ }
+ if sortOrder != pb.RangeRequest_NONE {
+ var sorter sort.Interface
+ switch {
+ case r.SortTarget == pb.RangeRequest_KEY:
+ sorter = &kvSortByKey{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_VERSION:
+ sorter = &kvSortByVersion{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_CREATE:
+ sorter = &kvSortByCreate{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_MOD:
+ sorter = &kvSortByMod{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_VALUE:
+ sorter = &kvSortByValue{&kvSort{rr.KVs}}
+ }
+ switch {
+ case sortOrder == pb.RangeRequest_ASCEND:
+ sort.Sort(sorter)
+ case sortOrder == pb.RangeRequest_DESCEND:
+ sort.Sort(sort.Reverse(sorter))
+ }
+ }
+
+ if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
+ rr.KVs = rr.KVs[:r.Limit]
+ resp.More = true
+ }
+ trace.Step("筛选键值对并对其排序")
+ resp.Header.Revision = rr.Rev
+ resp.Count = int64(rr.Count)
+ resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+ for i := range rr.KVs {
+ if r.KeysOnly {
+ rr.KVs[i].Value = ""
+ }
+ resp.Kvs[i] = &rr.KVs[i]
+ }
+ trace.Step("组装响应")
+ return resp, nil
+}
+
+func mkGteRange(rangeEnd []byte) []byte {
+ if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
+ return []byte{}
+ }
+ return rangeEnd
+}
+
+// 根据不同字段进行比较时,使用不同字段
+type kvSort struct{ kvs []mvccpb.KeyValue }
+
+func (s *kvSort) Swap(i, j int) {
+ t := s.kvs[i]
+ s.kvs[i] = s.kvs[j]
+ s.kvs[j] = t
+}
+func (s *kvSort) Len() int { return len(s.kvs) }
+
+type kvSortByKey struct{ *kvSort }
+
+func (s *kvSortByKey) Less(i, j int) bool {
+ return bytes.Compare([]byte(s.kvs[i].Key), []byte(s.kvs[j].Key)) < 0
+}
+
+type kvSortByVersion struct{ *kvSort }
+
+func (s *kvSortByVersion) Less(i, j int) bool {
+ return (s.kvs[i].Version - s.kvs[j].Version) < 0
+}
+
+type kvSortByCreate struct{ *kvSort }
+
+func (s *kvSortByCreate) Less(i, j int) bool {
+ return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
+}
+
+type kvSortByMod struct{ *kvSort }
+
+func (s *kvSortByMod) Less(i, j int) bool {
+ return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
+}
+
+type kvSortByValue struct{ *kvSort }
+
+func (s *kvSortByValue) Less(i, j int) bool {
+ return bytes.Compare([]byte(s.kvs[i].Value), []byte(s.kvs[j].Value)) < 0
+}
+
+func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp := &pb.AlarmResponse{}
+ oldCount := len(a.s.alarmStore.Get(ar.Alarm)) // 获取指定类型的警报数量
+
+ lg := a.s.Logger()
+ switch ar.Action {
+ case pb.AlarmRequest_GET:
+ resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
+ case pb.AlarmRequest_ACTIVATE:
+ if ar.Alarm == pb.AlarmType_NONE {
+ break
+ }
+ m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) // 记录、入库警报
+ if m == nil {
+ break
+ }
+ resp.Alarms = append(resp.Alarms, m)
+ activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1
+ if !activated {
+ break
+ }
+ lg.Warn("发生警报", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String()))
+ switch m.Alarm {
+ case pb.AlarmType_CORRUPT:
+ a.s.applyV3 = newApplierV3Corrupt(a)
+ case pb.AlarmType_NOSPACE:
+ a.s.applyV3 = newApplierV3Capped(a)
+ default:
+ lg.Panic("未实现的警报", zap.String("alarm", fmt.Sprintf("%+v", m)))
+ }
+ case pb.AlarmRequest_DEACTIVATE:
+ m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
+ if m == nil {
+ break
+ }
+ resp.Alarms = append(resp.Alarms, m)
+ deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0
+ if !deactivated {
+ break
+ }
+
+ switch m.Alarm {
+ case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT:
+ lg.Warn("警报解除", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String()))
+ a.s.applyV3 = a.s.newApplierV3()
+ default:
+ lg.Warn("未实现的警报解除类型", zap.String("alarm", fmt.Sprintf("%+v", m)))
+ }
+ default:
+ return nil, nil
+ }
+ return resp, nil
+}
+
+// RoleList ok
+func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := a.s.AuthStore().RoleList(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+// RoleGet ok
+func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := a.s.AuthStore().RoleGet(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+// RoleDelete ok
+func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := a.s.AuthStore().RoleDelete(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+// RoleAdd ok
+func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := a.s.AuthStore().RoleAdd(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := a.s.AuthStore().RoleGrantPermission(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := a.s.AuthStore().RoleRevokePermission(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ resp, err := a.s.AuthStore().UserAdd(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+// UserDelete ok
+func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := a.s.AuthStore().UserDelete(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ resp, err := a.s.AuthStore().UserChangePassword(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := a.s.AuthStore().UserGrantRole(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := a.s.AuthStore().UserGet(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := a.s.AuthStore().UserRevokeRole(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := a.s.AuthStore().UserList(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+// AuthEnable ok
+func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
+ err := a.s.AuthStore().AuthEnable()
+ if err != nil {
+ return nil, err
+ }
+ return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
+}
+
+// AuthDisable ok
+func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
+ a.s.AuthStore().AuthDisable()
+ return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
+}
+
+// AuthStatus ok
+func (a *applierV3backend) AuthStatus() (*pb.AuthStatusResponse, error) {
+ enabled := a.s.AuthStore().IsAuthEnabled()
+ authRevision := a.s.AuthStore().Revision()
+ return &pb.AuthStatusResponse{Header: newHeader(a.s), Enabled: enabled, AuthRevision: authRevision}, nil
+}
+
+func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken)
+ resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+// LeaseGrant 创建租约
+func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
+ resp := &pb.LeaseGrantResponse{}
+ if err == nil {
+ resp.ID = int64(l.ID)
+ resp.TTL = l.TTL()
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+// LeaseRevoke ok
+func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ fmt.Println("LeaseRevoke", lc)
+ err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
+ return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
+}
+
+// LeaseCheckpoint 避免 leader 变更时,导致的租约重置
+func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) {
+ fmt.Println("接收到checkpoint消息", lc.Checkpoints)
+ for _, c := range lc.Checkpoints {
+ err := a.s.lessor.Checkpoint(lease.LeaseID(c.ID), c.RemainingTtl)
+ if err != nil {
+ return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, err
+ }
+ }
+ return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, nil
+}
+
+// LeaseGrant 检查空间\创建租约
+func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ ok := a.q.Available(lc)
+ resp, err := a.applierV3.LeaseGrant(lc)
+ if err == nil && !ok {
+ err = ErrNoSpace
+ }
+ return resp, err
+}
+
+type quotaApplierV3 struct {
+ applierV3 // applierV3backend
+ q Quota
+}
+
+func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
+ return "aApplierV3{app, NewBackendQuota(s, "v3-applier")}
+}
+
+func (a *quotaApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ ok := a.q.Available(p) // 判断给定的请求是否符合配额要求
+ resp, trace, err := a.applierV3.Put(ctx, txn, p)
+ if err == nil && !ok {
+ err = ErrNoSpace
+ }
+ return resp, trace, err
+}
diff --git a/etcd/etcdserver/backend.go b/etcd/etcdserver/backend.go
new file mode 100644
index 00000000000..6d26404c004
--- /dev/null
+++ b/etcd/etcdserver/backend.go
@@ -0,0 +1,109 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/config"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+
+ "go.uber.org/zap"
+)
+
+func newBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
+ bcfg := backend.DefaultBackendConfig()
+ bcfg.Path = cfg.BackendPath()
+ bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync
+ if cfg.BackendBatchLimit != 0 {
+ bcfg.BatchLimit = cfg.BackendBatchLimit
+ if cfg.Logger != nil {
+ cfg.Logger.Info("设置后端batch限制", zap.Int("batch limit", cfg.BackendBatchLimit))
+ }
+ }
+ if cfg.BackendBatchInterval != 0 {
+ bcfg.BatchInterval = cfg.BackendBatchInterval
+ if cfg.Logger != nil {
+ cfg.Logger.Info("setting backend batch interval", zap.Duration("batch interval", cfg.BackendBatchInterval))
+ }
+ }
+ bcfg.BackendFreelistType = cfg.BackendFreelistType
+ bcfg.Logger = cfg.Logger
+ if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
+ // permit 10% excess over quota for disarm
+ bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
+ }
+ bcfg.Mlock = cfg.ExperimentalMemoryMlock
+ bcfg.Hooks = hooks
+ return backend.New(bcfg)
+}
+
+// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
+func openSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks backend.Hooks) (backend.Backend, error) {
+ snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find database snapshot file (%v)", err)
+ }
+ if err := os.Rename(snapPath, cfg.BackendPath()); err != nil {
+ return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err)
+ }
+ return openBackend(cfg, hooks), nil
+}
+
+// openBackend 返回一个使用当前etcd数据库的后端.
+func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
+ fn := cfg.BackendPath() // default.etcd/member/snap/db
+
+ now, beOpened := time.Now(), make(chan backend.Backend)
+ go func() {
+ beOpened <- newBackend(cfg, hooks)
+ }()
+
+ select {
+ case be := <-beOpened:
+ cfg.Logger.Info("打开后台数据库", zap.String("path", fn), zap.Duration("took", time.Since(now)))
+ return be
+
+ case <-time.After(10 * time.Second):
+ cfg.Logger.Info(
+ "db文件被另一个进程占用,或占用时间过长",
+ zap.String("path", fn),
+ zap.Duration("took", time.Since(now)),
+ )
+ }
+
+ return <-beOpened
+}
+
+// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
+// before updating the backend db after persisting raft snapshot to disk,
+// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
+// case, replace the db with the snapshot db sent by the leader.
+func recoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks backend.Hooks) (backend.Backend, error) {
+ consistentIndex := uint64(0)
+ if beExist {
+ consistentIndex, _ = cindex.ReadConsistentIndex(oldbe.BatchTx())
+ }
+ if snapshot.Metadata.Index <= consistentIndex {
+ return oldbe, nil
+ }
+ oldbe.Close()
+ return openSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks)
+}
diff --git a/etcd/etcdserver/cindex/cindex.go b/etcd/etcdserver/cindex/cindex.go
new file mode 100644
index 00000000000..6486359aab0
--- /dev/null
+++ b/etcd/etcdserver/cindex/cindex.go
@@ -0,0 +1,185 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cindex
+
+import (
+ "encoding/binary"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+)
+
+type Backend interface {
+ BatchTx() backend.BatchTx
+}
+
+// ConsistentIndexer 用于处理boltdb和raftlog之间的幂等性.
+type ConsistentIndexer interface {
+ ConsistentIndex() uint64 // 返回当前执行条目的一致索引
+ SetConsistentIndex(v uint64, term uint64) // 设置当前执行条目的一致索引
+ UnsafeSave(tx backend.BatchTx) // 必须在持有tx上的锁的情况下被调用. 它将一致索引保存到底层稳定存储中.
+ SetBackend(be Backend) // 为ConsistentIndexer设置可用的backend.BatchTx.
+}
+
+// 当boltdb用作状态机的时候,wal和boltdb作为两个不同的实体,很有可能存在不一致的情况.
+// 所以etcd在boltdb中存储一条记录consistent-index,来代表已经apply到bolt-db上成功的log index,
+// 这样当根据wal恢复bolt-db的时候,就可以判断log index是不是已经被apply过.
+
+// consistentIndex implements the ConsistentIndexer interface.
+type consistentIndex struct {
+ // consistentIndex represents the offset of an entry in a consistent replica log.
+ // It caches the "consistent_index" key's value.
+ // Accessed through atomics so必须是64-bit aligned.
+ consistentIndex uint64
+ // term represents the RAFT term of committed entry in a consistent replica log.
+ // Accessed through atomics so必须是64-bit aligned.
+ // The value is being persisted in the backend since v3.5.
+ term uint64
+
+ // be is used for initial read consistentIndex
+ be Backend
+ // mutex is protecting be.
+ mutex sync.Mutex
+}
+
+// NewConsistentIndex 返回一个一致性索引
+// 如果be is nil,必须在首次调用前执行ConsistentIndex方法
+func NewConsistentIndex(be Backend) ConsistentIndexer {
+ return &consistentIndex{be: be}
+}
+
+func (ci *consistentIndex) UnsafeSave(tx backend.BatchTx) {
+ index := atomic.LoadUint64(&ci.consistentIndex)
+ term := atomic.LoadUint64(&ci.term)
+ UnsafeUpdateConsistentIndex(tx, index, term, true)
+}
+
+func (ci *consistentIndex) SetBackend(be Backend) {
+ ci.mutex.Lock()
+ defer ci.mutex.Unlock()
+ ci.be = be
+ // After the backend is changed, the first access should re-read it.
+ ci.SetConsistentIndex(0, 0)
+}
+
+func NewFakeConsistentIndex(index uint64) ConsistentIndexer {
+ return &fakeConsistentIndex{index: index}
+}
+
+type fakeConsistentIndex struct {
+ index uint64
+ term uint64
+}
+
+func (f *fakeConsistentIndex) ConsistentIndex() uint64 { return f.index }
+
+func (f *fakeConsistentIndex) SetConsistentIndex(index uint64, term uint64) {
+ atomic.StoreUint64(&f.index, index)
+ atomic.StoreUint64(&f.term, term)
+}
+
+func (f *fakeConsistentIndex) UnsafeSave(_ backend.BatchTx) {}
+func (f *fakeConsistentIndex) SetBackend(_ Backend) {}
+
+func UnsafeUpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) {
+ if index == 0 {
+ // Never save 0 as it means that we didn't loaded the real index yet.
+ return
+ }
+
+ if onlyGrow {
+ oldi, oldTerm := unsafeReadConsistentIndex(tx)
+ if term < oldTerm {
+ return
+ }
+ if term == oldTerm && index <= oldi {
+ return
+ }
+ }
+
+ bs1 := make([]byte, 8)
+ binary.BigEndian.PutUint64(bs1, index)
+ // put the index into the underlying backend
+ // tx has been locked in TxnBegin, so there is no need to lock it again
+ tx.UnsafePut(buckets.Meta, buckets.MetaConsistentIndexKeyName, bs1)
+ if term > 0 {
+ bs2 := make([]byte, 8)
+ binary.BigEndian.PutUint64(bs2, term)
+ tx.UnsafePut(buckets.Meta, buckets.MetaTermKeyName, bs2)
+ }
+}
+
+// ----------------------------------------- OVER -----------------------------------------------
+
+func (ci *consistentIndex) ConsistentIndex() uint64 {
+ if index := atomic.LoadUint64(&ci.consistentIndex); index > 0 {
+ return index
+ }
+ ci.mutex.Lock()
+ defer ci.mutex.Unlock()
+
+ v, term := ReadConsistentIndex(ci.be.BatchTx())
+ ci.SetConsistentIndex(v, term)
+ return v
+}
+
+func (ci *consistentIndex) SetConsistentIndex(v uint64, term uint64) {
+ atomic.StoreUint64(&ci.consistentIndex, v)
+ atomic.StoreUint64(&ci.term, term)
+}
+
+func UnsafeCreateMetaBucket(tx backend.BatchTx) {
+ tx.UnsafeCreateBucket(buckets.Meta)
+}
+
+// CreateMetaBucket 创建meta bucket,如果不存在
+func CreateMetaBucket(tx backend.BatchTx) {
+ tx.Lock()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(buckets.Meta)
+}
+
+// 从bolt.db 加载一致的索引和任期
+func unsafeReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) {
+ // consistent_index
+ _, vs := tx.UnsafeRange(buckets.Meta, buckets.MetaConsistentIndexKeyName, nil, 0)
+ if len(vs) == 0 {
+ return 0, 0
+ }
+ v := binary.BigEndian.Uint64(vs[0])
+ // term
+ _, ts := tx.UnsafeRange(buckets.Meta, buckets.MetaTermKeyName, nil, 0)
+ if len(ts) == 0 {
+ return v, 0
+ }
+ t := binary.BigEndian.Uint64(ts[0])
+ return v, t
+}
+
+// ReadConsistentIndex 从给定的tx中加载一致的索引和任期.如果没有找到数据,返回0.
+func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) {
+ tx.Lock()
+ defer tx.Unlock()
+ return unsafeReadConsistentIndex(tx)
+}
+
+// UpdateConsistentIndex 会写到bolt.db meta库
+func UpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) {
+ tx.Lock()
+ defer tx.Unlock()
+ UnsafeUpdateConsistentIndex(tx, index, term, onlyGrow)
+}
diff --git a/etcd/etcdserver/cindex/doc.go b/etcd/etcdserver/cindex/doc.go
new file mode 100644
index 00000000000..1631be23fde
--- /dev/null
+++ b/etcd/etcdserver/cindex/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cindex 提供了一个获取/保存一致索引的接口和实现.
+package cindex
diff --git a/etcd/etcdserver/cluster_util.go b/etcd/etcdserver/cluster_util.go
new file mode 100644
index 00000000000..2bb7e52e25c
--- /dev/null
+++ b/etcd/etcdserver/cluster_util.go
@@ -0,0 +1,478 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+// isMemberBootstrapped 试图检查给定的成员是否已经在给定的集群中被引导了.
+func isMemberBootstrapped(lg *zap.Logger, cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
+ // 获取非本机的peer urls
+ rcl, err := getClusterFromRemotePeers(lg, getRemotePeerURLs(cl, member), timeout, false, rt) // 从远端节点获取到的集群节点信息
+ if err != nil {
+ // 初始化时,会有err此时member 是节点名字,而cl.member里的是hash之后的值
+ return false
+ }
+ id := cl.MemberByName(member).ID
+ m := rcl.Member(id) // 远端的
+ if m == nil {
+ return false
+ }
+ if len(m.ClientURLs) > 0 {
+ return true
+ }
+ return false
+}
+
+// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
+// attempts to construct a Cluster by accessing the members endpoint on one of
+// these URLs. The first URL to provide a response is used. If no URLs provide
+// a response, or a Cluster cannot backend successfully created from a received
+// response, an error is returned.
+// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
+// 10 second is enough for building connection and finishing request.
+func GetClusterFromRemotePeers(lg *zap.Logger, urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
+ return getClusterFromRemotePeers(lg, urls, 10*time.Second, true, rt)
+}
+
+// 从远端节点获取到的集群节点信息
+func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ cc := &http.Client{
+ Transport: rt,
+ Timeout: timeout,
+ }
+ for _, u := range urls {
+ addr := u + "/members"
+ resp, err := cc.Get(addr)
+ if err != nil {
+ if logerr {
+ lg.Warn("获取集群响应失败", zap.String("address", addr), zap.Error(err))
+ }
+ continue
+ }
+ b, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ if logerr {
+ lg.Warn("读取集群响应失败", zap.String("address", addr), zap.Error(err))
+ }
+ continue
+ }
+ var membs []*membership.Member
+ if err = json.Unmarshal(b, &membs); err != nil {
+ if logerr {
+ lg.Warn("反序列化集群响应失败", zap.String("address", addr), zap.Error(err))
+ }
+ continue
+ }
+ id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
+ if err != nil {
+ if logerr {
+ lg.Warn(
+ "无法解析集群ID",
+ zap.String("address", addr),
+ zap.String("header", resp.Header.Get("X-Etcd-Cluster-ID")),
+ zap.Error(err),
+ )
+ }
+ continue
+ }
+
+ if len(membs) > 0 {
+ return membership.NewClusterFromMembers(lg, id, membs), nil // Construct struct
+ }
+ return nil, fmt.Errorf("无法获取raft集群节点信息从远端节点")
+ }
+ return nil, fmt.Errorf("无法从给定的URL中检索到集群信息")
+}
+
+// getRemotePeerURLs 获取非本机的peer urls
+func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
+ us := make([]string, 0)
+ for _, m := range cl.Members() {
+ if m.Name == local {
+ continue
+ }
+ us = append(us, m.PeerURLs...)
+ }
+ sort.Strings(us)
+ return us
+}
+
+// getVersions returns the versions of the members in the given cluster.
+// The key of the returned map is the member's ID. The value of the returned map
+// is the semver versions string, including etcd and cluster.
+// If it fails to get the version of a member, the key will backend nil.
+func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
+ members := cl.Members()
+ vers := make(map[string]*version.Versions)
+ for _, m := range members {
+ if m.ID == local {
+ cv := "not_decided"
+ if cl.Version() != nil {
+ cv = cl.Version().String()
+ }
+ vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
+ continue
+ }
+ ver, err := getVersion(lg, m, rt)
+ if err != nil {
+ lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err))
+ vers[m.ID.String()] = nil
+ } else {
+ vers[m.ID.String()] = ver
+ }
+ }
+ return vers
+}
+
+// decideClusterVersion decides the cluster version based on the versions map.
+// The returned version is the min etcd version in the map, or nil if the min
+// version in unknown.
+func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version {
+ var cv *semver.Version
+ lv := semver.Must(semver.NewVersion(version.Version))
+
+ for mid, ver := range vers {
+ if ver == nil {
+ return nil
+ }
+ v, err := semver.NewVersion(ver.Server)
+ if err != nil {
+ lg.Warn(
+ "failed to parse etcd version of remote member",
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ zap.Error(err),
+ )
+ return nil
+ }
+ if lv.LessThan(*v) {
+ lg.Warn(
+ "leader found higher-versioned member",
+ zap.String("local-member-version", lv.String()),
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ )
+ }
+ if cv == nil {
+ cv = v
+ } else if v.LessThan(*cv) {
+ cv = v
+ }
+ }
+ return cv
+}
+
+// allowedVersionRange decides the available version range of the cluster that local etcd can join in;
+// if the downgrade enabled status is true, the version window is [oneMinorHigher, oneMinorHigher]
+// if the downgrade is not enabled, the version window is [MinClusterVersion, localVersion]
+func allowedVersionRange(downgradeEnabled bool) (minV *semver.Version, maxV *semver.Version) {
+ minV = semver.Must(semver.NewVersion(version.MinClusterVersion))
+ maxV = semver.Must(semver.NewVersion(version.Version))
+ maxV = &semver.Version{Major: maxV.Major, Minor: maxV.Minor}
+
+ if downgradeEnabled {
+ // Todo: handle the case that downgrading from higher major version(e.g. downgrade from v4.0 to v3.x)
+ maxV.Minor = maxV.Minor + 1
+ minV = &semver.Version{Major: maxV.Major, Minor: maxV.Minor}
+ }
+ return minV, maxV
+}
+
+// isCompatibleWithCluster return true if the local member has a compatible version with
+// the current running cluster.
+// The version is considered as compatible when at least one of the other members in the cluster has a
+// cluster version in the range of [MinV, MaxV] and no known members has a cluster version
+// out of the range.
+// We set this rule since when the local member joins, another member might backend offline.
+func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
+ vers := getVersions(lg, cl, local, rt)
+ minV, maxV := allowedVersionRange(getDowngradeEnabledFromRemotePeers(lg, cl, local, rt))
+ return isCompatibleWithVers(lg, vers, local, minV, maxV)
+}
+
+func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
+ var ok bool
+ for id, v := range vers {
+ // ignore comparison with local version
+ if id == local.String() {
+ continue
+ }
+ if v == nil {
+ continue
+ }
+ clusterv, err := semver.NewVersion(v.Cluster)
+ if err != nil {
+ lg.Warn(
+ "failed to parse cluster version of remote member",
+ zap.String("remote-member-id", id),
+ zap.String("remote-member-cluster-version", v.Cluster),
+ zap.Error(err),
+ )
+ continue
+ }
+ if clusterv.LessThan(*minV) {
+ lg.Warn(
+ "cluster version of remote member is not compatible; too low",
+ zap.String("remote-member-id", id),
+ zap.String("remote-member-cluster-version", clusterv.String()),
+ zap.String("minimum-cluster-version-supported", minV.String()),
+ )
+ return false
+ }
+ if maxV.LessThan(*clusterv) {
+ lg.Warn(
+ "cluster version of remote member is not compatible; too high",
+ zap.String("remote-member-id", id),
+ zap.String("remote-member-cluster-version", clusterv.String()),
+ zap.String("minimum-cluster-version-supported", minV.String()),
+ )
+ return false
+ }
+ ok = true
+ }
+ return ok
+}
+
+// getVersion returns the Versions of the given member via its
+// peerURLs. Returns the last error if it fails to get the version.
+func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
+ cc := &http.Client{
+ Transport: rt,
+ }
+ var (
+ err error
+ resp *http.Response
+ )
+
+ for _, u := range m.PeerURLs {
+ addr := u + "/version"
+ resp, err = cc.Get(addr)
+ if err != nil {
+ lg.Warn(
+ "failed to reach the peer URL",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ var b []byte
+ b, err = ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ lg.Warn(
+ "failed to read body of response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ var vers version.Versions
+ if err = json.Unmarshal(b, &vers); err != nil {
+ lg.Warn(
+ "failed to unmarshal response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ return &vers, nil
+ }
+ return nil, err
+}
+
+func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.RoundTripper) ([]*membership.Member, error) {
+ cc := &http.Client{Transport: peerRt}
+ // TODO: refactor member http handler code
+ // cannot import etcdhttp, so manually construct url
+ requestUrl := url + "/members/promote/" + fmt.Sprintf("%d", id)
+ req, err := http.NewRequest("POST", requestUrl, nil)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+ resp, err := cc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode == http.StatusRequestTimeout {
+ return nil, ErrTimeout
+ }
+ if resp.StatusCode == http.StatusPreconditionFailed {
+ // both ErrMemberNotLearner and ErrLearnerNotReady have same http status code
+ if strings.Contains(string(b), ErrLearnerNotReady.Error()) {
+ return nil, ErrLearnerNotReady
+ }
+ if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) {
+ return nil, membership.ErrMemberNotLearner
+ }
+ return nil, fmt.Errorf("member promote: unknown error(%s)", string(b))
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ return nil, membership.ErrIDNotFound
+ }
+
+ if resp.StatusCode != http.StatusOK { // all other types of errors
+ return nil, fmt.Errorf("member promote: unknown error(%s)", string(b))
+ }
+
+ var membs []*membership.Member
+ if err := json.Unmarshal(b, &membs); err != nil {
+ return nil, err
+ }
+ return membs, nil
+}
+
+// getDowngradeEnabledFromRemotePeers will get the downgrade enabled status of the cluster.
+func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
+ members := cl.Members()
+
+ for _, m := range members {
+ if m.ID == local {
+ continue
+ }
+ enable, err := getDowngradeEnabled(lg, m, rt)
+ if err != nil {
+ lg.Warn("failed to get downgrade enabled status", zap.String("remote-member-id", m.ID.String()), zap.Error(err))
+ } else {
+ // Since the "/downgrade/enabled" serves linearized data,
+ // this function can return once it gets a non-error response from the endpoint.
+ return enable
+ }
+ }
+ return false
+}
+
+// getDowngradeEnabled returns the downgrade enabled status of the given member
+// via its peerURLs. Returns the last error if it fails to get it.
+func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (bool, error) {
+ cc := &http.Client{
+ Transport: rt,
+ }
+ var (
+ err error
+ resp *http.Response
+ )
+
+ for _, u := range m.PeerURLs {
+ addr := u + DowngradeEnabledPath
+ resp, err = cc.Get(addr)
+ if err != nil {
+ lg.Warn(
+ "failed to reach the peer URL",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ var b []byte
+ b, err = ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ lg.Warn(
+ "failed to read body of response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ var enable bool
+ if enable, err = strconv.ParseBool(string(b)); err != nil {
+ lg.Warn(
+ "failed to convert response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ return enable, nil
+ }
+ return false, err
+}
+
+// isMatchedVersions returns true if all etcd versions are equal to target version, otherwise return false.
+// It can backend used to decide the whether the cluster finishes downgrading to target version.
+func isMatchedVersions(lg *zap.Logger, targetVersion *semver.Version, vers map[string]*version.Versions) bool {
+ for mid, ver := range vers {
+ if ver == nil {
+ return false
+ }
+ v, err := semver.NewVersion(ver.Cluster)
+ if err != nil {
+ lg.Warn(
+ "failed to parse etcd version of remote member",
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ zap.Error(err),
+ )
+ return false
+ }
+ if !targetVersion.Equal(*v) {
+ lg.Warn("remotes etcd has mismatching etcd version",
+ zap.String("remote-member-id", mid),
+ zap.String("current-etcd-version", v.String()),
+ zap.String("target-version", targetVersion.String()),
+ )
+ return false
+ }
+ }
+ return true
+}
+
+func convertToClusterVersion(v string) (*semver.Version, error) {
+ ver, err := semver.NewVersion(v)
+ if err != nil {
+ // allow input version format Major.Minor
+ ver, err = semver.NewVersion(v + ".0")
+ if err != nil {
+ return nil, ErrWrongDowngradeVersionFormat
+ }
+ }
+ // cluster version only keeps major.minor, remove patch version
+ ver = &semver.Version{Major: ver.Major, Minor: ver.Minor}
+ return ver, nil
+}
diff --git a/etcd/etcdserver/errors.go b/etcd/etcdserver/errors.go
new file mode 100644
index 00000000000..e82e539caa9
--- /dev/null
+++ b/etcd/etcdserver/errors.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ ErrUnknownMethod = errors.New("etcdserver: 未知的请求方法")
+ ErrStopped = errors.New("etcdserver: etcd停止")
+ ErrCanceled = errors.New("etcdserver: 请求取消")
+ ErrTimeout = errors.New("etcdserver: 请求超时")
+ ErrTimeoutDueToLeaderFail = errors.New("etcdserver: 请求超时,可能是由于之前的领导者失败了")
+ ErrTimeoutDueToConnectionLost = errors.New("etcdserver: 请求超时,可能是由于连接丢失")
+ ErrTimeoutLeaderTransfer = errors.New("etcdserver: 请求超时,领导者转移时间过长")
+ ErrLeaderChanged = errors.New("etcdserver: 领导者转移了")
+ ErrNotEnoughStartedMembers = errors.New("etcdserver: 由于启动的成员不足,重新配置失败")
+ ErrLearnerNotReady = errors.New("etcdserver: 只能提拔与leader同步的learner成员")
+ ErrNoLeader = errors.New("etcdserver: 没有leader")
+ ErrNotLeader = errors.New("etcdserver: 不是leader")
+ ErrRequestTooLarge = errors.New("etcdserver: 请求太多")
+ ErrNoSpace = errors.New("etcdserver: 没有空间")
+ ErrTooManyRequests = errors.New("etcdserver: 太多的请求")
+ ErrUnhealthy = errors.New("etcdserver: 集群不健康")
+ ErrKeyNotFound = errors.New("etcdserver: key没找到")
+ ErrCorrupt = errors.New("etcdserver: 损坏的集群")
+ ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee")
+ ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade")
+ ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format")
+ ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version")
+ ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress")
+ ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job")
+)
+
+type DiscoveryError struct {
+ Op string
+ Err error
+}
+
+func (e DiscoveryError) Error() string {
+ return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
+}
diff --git a/etcd/etcdserver/kv.go b/etcd/etcdserver/kv.go
new file mode 100644
index 00000000000..d5bbc5931c1
--- /dev/null
+++ b/etcd/etcdserver/kv.go
@@ -0,0 +1,276 @@
+package etcdserver
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "go.uber.org/zap"
+)
+
+// CheckInitialHashKV 在提供任何对等/客户流量之前,将初始哈希值与peer进行比较.只有当哈希值在要求的修订版上不一样时,才会出现不匹配,而压缩的修订版是相同的.
+func (s *EtcdServer) CheckInitialHashKV() error {
+ if !s.Cfg.InitialCorruptCheck { // 没有开启数据毁坏检测功能
+ return nil
+ }
+
+ lg := s.Logger()
+
+ lg.Info(
+ "starting initial corruption check",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Duration("timeout", s.Cfg.ReqTimeout()),
+ )
+
+ h, rev, crev, err := s.kv.HashByRev(0)
+ if err != nil {
+ return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err)
+ }
+ peers := s.getPeerHashKVs(rev)
+ mismatch := 0
+ for _, p := range peers {
+ if p.resp != nil {
+ peerID := types.ID(p.resp.Header.MemberId)
+ fields := []zap.Field{
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int64("local-member-revision", rev),
+ zap.Int64("local-member-compact-revision", crev),
+ zap.Uint32("local-member-hash", h),
+ zap.String("remote-peer-id", peerID.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Int64("remote-peer-revision", p.resp.Header.Revision),
+ zap.Int64("remote-peer-compact-revision", p.resp.CompactRevision),
+ zap.Uint32("remote-peer-hash", p.resp.Hash),
+ }
+
+ if h != p.resp.Hash {
+ if crev == p.resp.CompactRevision {
+ lg.Warn("found different hash values from remote peer", fields...)
+ mismatch++
+ } else {
+ lg.Warn("found different compact revision values from remote peer", fields...)
+ }
+ }
+
+ continue
+ }
+
+ if p.err != nil {
+ switch p.err {
+ case rpctypes.ErrFutureRev:
+ lg.Warn(
+ "cannot fetch hash from slow remote peer",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int64("local-member-revision", rev),
+ zap.Int64("local-member-compact-revision", crev),
+ zap.Uint32("local-member-hash", h),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Error(err),
+ )
+ case rpctypes.ErrCompacted:
+ lg.Warn(
+ "cannot fetch hash from remote peer; local member is behind",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int64("local-member-revision", rev),
+ zap.Int64("local-member-compact-revision", crev),
+ zap.Uint32("local-member-hash", h),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Error(err),
+ )
+ }
+ }
+ }
+ if mismatch > 0 {
+ return fmt.Errorf("%s found data inconsistency with peers", s.ID())
+ }
+
+ lg.Info(
+ "initial corruption checking passed; no corruption",
+ zap.String("local-member-id", s.ID().String()),
+ )
+ return nil
+}
+
+func (s *EtcdServer) monitorKVHash() {
+ t := s.Cfg.CorruptCheckTime
+ if t == 0 {
+ return
+ }
+
+ lg := s.Logger()
+ lg.Info("启用损坏检查", zap.String("local-member-id", s.ID().String()), zap.Duration("interval", t))
+
+ for {
+ select {
+ case <-s.stopping:
+ return
+ case <-time.After(t):
+ }
+ if !s.isLeader() {
+ continue
+ }
+ if err := s.checkHashKV(); err != nil {
+ lg.Warn("failed to check hash KV", zap.Error(err))
+ }
+ }
+}
+
+func (s *EtcdServer) checkHashKV() error {
+ lg := s.Logger()
+
+ h, rev, crev, err := s.kv.HashByRev(0)
+ if err != nil {
+ return err
+ }
+ peers := s.getPeerHashKVs(rev)
+
+ ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+ err = s.linearizeReadNotify(ctx)
+ cancel()
+ if err != nil {
+ return err
+ }
+
+ h2, rev2, crev2, err := s.kv.HashByRev(0)
+ if err != nil {
+ return err
+ }
+
+ alarmed := false
+ mismatch := func(id uint64) {
+ if alarmed {
+ return
+ }
+ alarmed = true
+ a := &pb.AlarmRequest{
+ MemberID: id,
+ Action: pb.AlarmRequest_ACTIVATE, // checkHashKV
+ Alarm: pb.AlarmType_CORRUPT,
+ }
+ s.GoAttach(func() {
+ s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
+ })
+ }
+
+ if h2 != h && rev2 == rev && crev == crev2 {
+ lg.Warn(
+ "found hash mismatch",
+ zap.Int64("revision-1", rev),
+ zap.Int64("compact-revision-1", crev),
+ zap.Uint32("hash-1", h),
+ zap.Int64("revision-2", rev2),
+ zap.Int64("compact-revision-2", crev2),
+ zap.Uint32("hash-2", h2),
+ )
+ mismatch(uint64(s.ID()))
+ }
+
+ checkedCount := 0
+ for _, p := range peers {
+ if p.resp == nil {
+ continue
+ }
+ checkedCount++
+ id := p.resp.Header.MemberId
+
+ // leader expects follower's latest revision less than or equal to leader's
+ if p.resp.Header.Revision > rev2 {
+ lg.Warn(
+ "revision from follower必须是less than or equal to leader's",
+ zap.Int64("leader-revision", rev2),
+ zap.Int64("follower-revision", p.resp.Header.Revision),
+ zap.String("follower-peer-id", types.ID(id).String()),
+ )
+ mismatch(id)
+ }
+
+ // leader expects follower's latest compact revision less than or equal to leader's
+ if p.resp.CompactRevision > crev2 {
+ lg.Warn(
+ "compact revision from follower必须是less than or equal to leader's",
+ zap.Int64("leader-compact-revision", crev2),
+ zap.Int64("follower-compact-revision", p.resp.CompactRevision),
+ zap.String("follower-peer-id", types.ID(id).String()),
+ )
+ mismatch(id)
+ }
+
+ // follower's compact revision is leader's old one, then hashes must match
+ if p.resp.CompactRevision == crev && p.resp.Hash != h {
+ lg.Warn(
+ "same compact revision then hashes must match",
+ zap.Int64("leader-compact-revision", crev2),
+ zap.Uint32("leader-hash", h),
+ zap.Int64("follower-compact-revision", p.resp.CompactRevision),
+ zap.Uint32("follower-hash", p.resp.Hash),
+ zap.String("follower-peer-id", types.ID(id).String()),
+ )
+ mismatch(id)
+ }
+ }
+ lg.Info("finished peer corruption check", zap.Int("number-of-peers-checked", checkedCount))
+ return nil
+}
+
+type peerInfo struct {
+ id types.ID
+ eps []string
+}
+
+type peerHashKVResp struct {
+ peerInfo
+ resp *pb.HashKVResponse
+ err error
+}
+
+func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
+ // TODO: handle the case when "s.cluster.Members" have not
+ // been populated (e.g. no snapshot to load from disk)
+ members := s.cluster.Members()
+ peers := make([]peerInfo, 0, len(members))
+ for _, m := range members {
+ if m.ID == s.ID() {
+ continue
+ }
+ peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs})
+ }
+
+ lg := s.Logger()
+
+ var resps []*peerHashKVResp
+ for _, p := range peers {
+ if len(p.eps) == 0 {
+ continue
+ }
+
+ respsLen := len(resps)
+ var lastErr error
+ for _, ep := range p.eps {
+ ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+ resp, lastErr := s.getPeerHashKVHTTP(ctx, ep, rev)
+ cancel()
+ if lastErr == nil {
+ resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil})
+ break
+ }
+ lg.Warn(
+ "failed hash kv request",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int64("requested-revision", rev),
+ zap.String("remote-peer-endpoint", ep),
+ zap.Error(lastErr),
+ )
+ }
+
+ // failed to get hashKV from all endpoints of this peer
+ if respsLen == len(resps) {
+ resps = append(resps, &peerHashKVResp{peerInfo: p, resp: nil, err: lastErr})
+ }
+ }
+ return resps
+}
diff --git a/etcd/etcdserver/over_alarms_method_overwirter.go b/etcd/etcdserver/over_alarms_method_overwirter.go
new file mode 100644
index 00000000000..0137972c864
--- /dev/null
+++ b/etcd/etcdserver/over_alarms_method_overwirter.go
@@ -0,0 +1,79 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+)
+
+type applierV3Corrupt struct {
+ applierV3
+}
+
+func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
+
+func (a *applierV3Corrupt) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ return nil, nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) {
+ return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ return nil, nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
+ return nil, nil, nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ return nil, ErrCorrupt
+}
+
+type applierV3Capped struct {
+ applierV3
+ q backendQuota
+}
+
+func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
+
+func (a *applierV3Capped) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ return nil, nil, ErrNoSpace
+}
+
+func (a *applierV3Capped) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ if a.q.Cost(r) > 0 {
+ return nil, nil, ErrNoSpace
+ }
+ return a.applierV3.Txn(ctx, r)
+}
+
+func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return nil, ErrNoSpace
+}
diff --git a/etcd/etcdserver/over_httpserver_access_control.go b/etcd/etcdserver/over_httpserver_access_control.go
new file mode 100644
index 00000000000..e5a5d64ba44
--- /dev/null
+++ b/etcd/etcdserver/over_httpserver_access_control.go
@@ -0,0 +1,55 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import "sync"
+
+// AccessController 控制 etcd http请求的访问控制
+type AccessController struct {
+ corsMu sync.RWMutex
+ CORS map[string]struct{}
+ hostWhitelistMu sync.RWMutex
+ HostWhitelist map[string]struct{}
+}
+
+// OriginAllowed 是否允许跨域请求
+func (ac *AccessController) OriginAllowed(origin string) bool {
+ ac.corsMu.RLock()
+ defer ac.corsMu.RUnlock()
+ if len(ac.CORS) == 0 { // allow all
+ return true
+ }
+ _, ok := ac.CORS["*"]
+ if ok {
+ return true
+ }
+ _, ok = ac.CORS[origin]
+ return ok
+}
+
+// IsHostWhitelisted 返回host在不在白名单里
+func (ac *AccessController) IsHostWhitelisted(host string) bool {
+ ac.hostWhitelistMu.RLock()
+ defer ac.hostWhitelistMu.RUnlock()
+ if len(ac.HostWhitelist) == 0 { // allow all
+ return true
+ }
+ _, ok := ac.HostWhitelist["*"]
+ if ok {
+ return true
+ }
+ _, ok = ac.HostWhitelist[host]
+ return ok
+}
diff --git a/etcd/etcdserver/over_linearize_read.go b/etcd/etcdserver/over_linearize_read.go
new file mode 100644
index 00000000000..4c3ca48bf6b
--- /dev/null
+++ b/etcd/etcdserver/over_linearize_read.go
@@ -0,0 +1,189 @@
+package etcdserver
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+ "github.com/ls-2018/etcd_cn/raft"
+ "go.uber.org/zap"
+)
+
+type notifier struct {
+ c chan struct{}
+ err error
+}
+
+// 通知
+func newNotifier() *notifier {
+ return ¬ifier{
+ c: make(chan struct{}),
+ }
+}
+
+func (nc *notifier) notify(err error) {
+ nc.err = err
+ close(nc.c)
+}
+
+// 线性一致性读,保证强一致性 , 阻塞,直到applyid >= 当前生成的ID
+func (s *EtcdServer) linearizableReadLoop() {
+ for {
+ requestId := s.reqIDGen.Next()
+ leaderChangedNotifier := s.LeaderChangedNotify()
+ select {
+ case <-leaderChangedNotifier:
+ continue
+ case <-s.readwaitc:
+ // 在client发起一次Linearizable Read的时候,会向readwaitc写入一个空的结构体作为信号
+ fmt.Println("开始一次linearizableRead")
+ case <-s.stopping:
+ return
+ }
+
+ // 因为一个循环可以解锁多个读数所以从Txn或Range传播追踪不是很有用.
+ trace := traceutil.New("linearizableReadLoop", s.Logger())
+
+ s.readMu.Lock()
+ nr := s.readNotifier
+ s.readNotifier = newNotifier()
+ s.readMu.Unlock()
+ // 处理不同的消息
+ // 这里会监听 readwaitc,发送MsgReadIndex 并等待 MsgReadIndexRsp
+ // 同时获取当前已提交的日志索引
+ // 串行执行的
+ confirmedIndex, err := s.requestCurrentIndex(leaderChangedNotifier, requestId) // MsgReadIndex 携带requestId经过raft走一圈
+ if isStopped(err) {
+ return
+ }
+ if err != nil {
+ nr.notify(err)
+ continue
+ }
+
+ trace.Step("收到要读的索引")
+ trace.AddField(traceutil.Field{Key: "readStateIndex", Value: confirmedIndex})
+ appliedIndex := s.getAppliedIndex()
+ trace.AddField(traceutil.Field{Key: "appliedIndex", Value: strconv.FormatUint(appliedIndex, 10)})
+ // 此处是重点 等待 apply index >= read index
+ if appliedIndex < confirmedIndex {
+ select {
+ case <-s.applyWait.Wait(confirmedIndex):
+ case <-s.stopping:
+ return
+ }
+ }
+ // 发出可以进行读取状态机的信号
+ nr.notify(nil)
+ trace.Step("applied 索引现在低于 readState.Index")
+ }
+}
+
+// 请求当前索引
+func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestId uint64) (uint64, error) {
+ err := s.sendReadIndex(requestId) // 线性读生成的7587861540711705347 就是异步发送一条raft的消息
+ if err != nil {
+ return 0, err
+ }
+
+ lg := s.Logger()
+ errorTimer := time.NewTimer(s.Cfg.ReqTimeout())
+ defer errorTimer.Stop()
+ retryTimer := time.NewTimer(readIndexRetryTime) // 500ms
+ defer retryTimer.Stop()
+
+ firstCommitInTermNotifier := s.FirstCommitInTermNotify()
+
+ for {
+ select {
+ case rs := <-s.r.readStateC: // err := s.sendReadIndex(requestId) 经由raft会往这里发一个信号
+ requestIdBytes := uint64ToBigEndianBytes(requestId)
+ gotOwnResponse := bytes.Equal(rs.RequestCtx, requestIdBytes)
+ // rs.RequestCtxrequestIdBytes 可能是高并发情景下,下一次get请求导致的
+ if !gotOwnResponse {
+ // 前一个请求可能超时.现在我们应该忽略它的响应,继续等待当前请求的响应.
+ responseId := uint64(0)
+ if len(rs.RequestCtx) == 8 {
+ responseId = binary.BigEndian.Uint64(rs.RequestCtx)
+ }
+ lg.Warn(
+ "忽略过期的读索引响应;本地节点读取索引排队等待后端与leader同步",
+ zap.Uint64("sent-request-id", requestId),
+ zap.Uint64("received-request-id", responseId),
+ )
+ continue
+ }
+ return rs.Index, nil // 返回的是leader已经committed的索引
+ case <-leaderChangedNotifier:
+ return 0, ErrLeaderChanged
+ case <-firstCommitInTermNotifier:
+ firstCommitInTermNotifier = s.FirstCommitInTermNotify()
+ lg.Info("第一次提交:重发ReadIndex请求")
+ err := s.sendReadIndex(requestId)
+ if err != nil {
+ return 0, err
+ }
+ retryTimer.Reset(readIndexRetryTime)
+ continue
+ case <-retryTimer.C:
+ lg.Warn("等待ReadIndex响应时间过长,需要重新尝试", zap.Uint64("sent-request-id", requestId), zap.Duration("retry-timeout", readIndexRetryTime))
+ err := s.sendReadIndex(requestId)
+ if err != nil {
+ return 0, err
+ }
+ retryTimer.Reset(readIndexRetryTime)
+ continue
+ case <-errorTimer.C:
+ lg.Warn("等待读索引响应时超时(本地节点可能有较慢的网络)", zap.Duration("timeout", s.Cfg.ReqTimeout()))
+ return 0, ErrTimeout
+ case <-s.stopping:
+ return 0, ErrStopped
+ }
+ }
+}
+
+// etcdctl get 就是异步发送一条raft的消息
+func (s *EtcdServer) sendReadIndex(requestIndex uint64) error {
+ ctxToSend := uint64ToBigEndianBytes(requestIndex)
+
+ cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+ // 就是异步发送一条raft的消息
+ err := s.r.ReadIndex(cctx, ctxToSend) // 发出去就完事了, 发到内存里
+ cancel()
+ if err == raft.ErrStopped {
+ return err
+ }
+ if err != nil {
+ lg := s.Logger()
+ lg.Warn("未能从Raft获取读取索引", zap.Error(err))
+ return err
+ }
+ return nil
+}
+
+// 进行一次 线性读取准备
+func (s *EtcdServer) linearizeReadNotify(ctx context.Context) error {
+ s.readMu.RLock()
+ nc := s.readNotifier
+ s.readMu.RUnlock()
+
+ select {
+ case s.readwaitc <- struct{}{}: // linearizableReadLoop就会开始结束阻塞开始工作
+ default:
+ }
+
+ // 等待读状态通知
+ select {
+ case <-nc.c:
+ return nc.err
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-s.done:
+ return ErrStopped
+ }
+}
diff --git a/etcd/etcdserver/over_quota.go b/etcd/etcdserver/over_quota.go
new file mode 100644
index 00000000000..784c5731d8f
--- /dev/null
+++ b/etcd/etcdserver/over_quota.go
@@ -0,0 +1,152 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "sync"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+)
+
+const (
+ DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB 是指在超过空间配额之前后端大小可能消耗的字节数.
+ MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB 是建议用于后端配额的最大字节数.较大的配额可能会导致性能下降.
+)
+
+// Quota 代表一个针对任意请求的任意配额.每个请求要花费一定的费用;如果没有足够的剩余费用那么配额内可用的资源就太少了无法应用该请求.
+type Quota interface {
+ Available(req interface{}) bool // 判断给定的请求是否符合配额要求.
+ Cost(req interface{}) int // 计算对某一请求的配额的开销.
+ Remaining() int64 // 剩余配额
+}
+
+type passthroughQuota struct{}
+
+func (*passthroughQuota) Available(interface{}) bool { return true }
+func (*passthroughQuota) Cost(interface{}) int { return 0 }
+func (*passthroughQuota) Remaining() int64 { return 1 }
+
+type backendQuota struct {
+ s *EtcdServer
+ maxBackendBytes int64
+}
+
+const (
+ leaseOverhead = 64 // 是对租约物的存储成本的估计.
+ kvOverhead = 256 // 是对存储一个密钥的元数据的成本的估计.
+)
+
+var (
+ quotaLogOnce sync.Once
+ DefaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes))
+ maxQuotaSize = humanize.Bytes(uint64(MaxQuotaBytes))
+)
+
+// NewBackendQuota 创建一个具有给定存储限制的配额层.
+func NewBackendQuota(s *EtcdServer, name string) Quota {
+ lg := s.Logger()
+
+ if s.Cfg.QuotaBackendBytes < 0 {
+ quotaLogOnce.Do(func() {
+ lg.Info("禁用后端配额", zap.String("quota-name", name), zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes))
+ })
+ return &passthroughQuota{}
+ }
+
+ if s.Cfg.QuotaBackendBytes == 0 {
+ quotaLogOnce.Do(func() {
+ if lg != nil {
+ lg.Info(
+ "启用后端配置默认值",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", DefaultQuotaBytes),
+ zap.String("quota-size", DefaultQuotaSize),
+ )
+ }
+ })
+ return &backendQuota{s, DefaultQuotaBytes}
+ }
+
+ quotaLogOnce.Do(func() {
+ if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
+ lg.Warn(
+ "配额超过了最大值",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
+ zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
+ zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes),
+ zap.String("quota-maximum-size", maxQuotaSize),
+ )
+ }
+ lg.Info(
+ "启用配额",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
+ zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
+ )
+ })
+ return &backendQuota{s, s.Cfg.QuotaBackendBytes}
+}
+
+// Available 粗略计算是否可以存储
+func (b *backendQuota) Available(v interface{}) bool {
+ return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
+}
+
+// Cost 操作的开销
+func (b *backendQuota) Cost(v interface{}) int {
+ switch r := v.(type) {
+ case *pb.PutRequest:
+ return costPut(r)
+ case *pb.TxnRequest:
+ return costTxn(r)
+ case *pb.LeaseGrantRequest:
+ return leaseOverhead
+ default:
+ panic("未知的 cost")
+ }
+}
+
+func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
+
+func costTxnReq(u *pb.RequestOp) int {
+ r := u.GetRequestPut()
+ if r == nil {
+ return 0
+ }
+ return costPut(r)
+}
+
+func costTxn(r *pb.TxnRequest) int {
+ sizeSuccess := 0
+ for _, u := range r.Success {
+ sizeSuccess += costTxnReq(u)
+ }
+ sizeFailure := 0
+ for _, u := range r.Failure {
+ sizeFailure += costTxnReq(u)
+ }
+ if sizeFailure > sizeSuccess {
+ return sizeFailure
+ }
+ return sizeSuccess
+}
+
+func (b *backendQuota) Remaining() int64 {
+ return b.maxBackendBytes - b.s.Backend().Size()
+}
diff --git a/etcd/etcdserver/over_raft.go b/etcd/etcdserver/over_raft.go
new file mode 100644
index 00000000000..8ee093cea73
--- /dev/null
+++ b/etcd/etcdserver/over_raft.go
@@ -0,0 +1,673 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "encoding/json"
+ "expvar"
+ "fmt"
+ "log"
+ "sort"
+ "sync"
+ "time"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/config"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/wal"
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/logutil"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp"
+ "github.com/ls-2018/etcd_cn/pkg/contention"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+ "go.uber.org/zap"
+)
+
+const (
+ maxSizePerMsg = 1 * 1024 * 1024 // 1M
+ maxInflightMsgs = 4096 / 8 // 512
+)
+
+var (
+ // protects raftStatus
+ raftStatusMu sync.Mutex
+ // indirection for expvar func interface
+ // expvar panics when publishing duplicate name
+ // expvar does not support remove a registered name
+ // so only register a func that calls raftStatus
+ // and change raftStatus as we need.
+ raftStatus func() raft.Status
+)
+
+func init() {
+ expvar.Publish("raft.status", expvar.Func(func() interface{} {
+ raftStatusMu.Lock()
+ defer raftStatusMu.Unlock()
+ if raftStatus == nil {
+ return nil
+ }
+ return raftStatus()
+ }))
+}
+
+// apply contains entries, snapshot to backend applied. Once
+// an apply is consumed, the entries will backend persisted to
+// to raft storage concurrently; the application must read
+// raftDone before assuming the raft messages are stable.
+type apply struct {
+ entries []raftpb.Entry
+ snapshot raftpb.Snapshot
+ // notifyc synchronizes etcd etcd applies with the raft node
+ notifyc chan struct{}
+}
+
+type raftNodeConfig struct {
+ lg *zap.Logger
+ isIDRemoved func(id uint64) bool // to check if msg receiver is removed from cluster
+ raft.RaftNodeInterFace
+ raftStorage *raft.MemoryStorage
+ storage Storage
+ heartbeat time.Duration // for logging
+ // transport specifies the transport to send and receive msgs to members.
+ // Sending messages MUST NOT block. It is okay to drop messages, since
+ // clients should timeout and reissue their messages.
+ // If transport is nil, etcd will panic.
+ transport rafthttp.Transporter
+}
+
+func newRaftNode(cfg raftNodeConfig) *raftNode {
+ var lg raft.Logger
+ if cfg.lg != nil {
+ lg = NewRaftLoggerZap(cfg.lg)
+ } else {
+ lcfg := logutil.DefaultZapLoggerConfig
+ var err error
+ lg, err = NewRaftLogger(&lcfg)
+ if err != nil {
+ log.Fatalf("cannot create raft logger %v", err)
+ }
+ }
+ raft.SetLogger(lg)
+ r := &raftNode{
+ lg: cfg.lg,
+ tickMu: new(sync.Mutex),
+ raftNodeConfig: cfg,
+ // set up contention detectors for raft heartbeat message.
+ // expect to send a heartbeat within 2 heartbeat intervals.
+ td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
+ readStateC: make(chan raft.ReadState, 1),
+ msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
+ applyc: make(chan apply),
+ stopped: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ if r.heartbeat == 0 {
+ r.ticker = &time.Ticker{}
+ } else {
+ r.ticker = time.NewTicker(r.heartbeat)
+ }
+ return r
+}
+
+// raft状态机,维护raft状态机的步进和状态迁移.
+type raftNode struct {
+ lg *zap.Logger
+ tickMu *sync.Mutex
+ raftNodeConfig // 包含了node、storage等重要数据结构
+ msgSnapC chan raftpb.Message // a chan to send/receive snapshot
+ applyc chan apply // a chan to send out apply
+ readStateC chan raft.ReadState // 发送readState的chan
+ ticker *time.Ticker // raft 中有两个时间计数器,它们分别是选举计数器 (Follower/Candidate)和心跳计数器 (Leader),它们都依靠 tick 来推进时钟
+ td *contention.TimeoutDetector // contention detectors for raft heartbeat message
+ stopped chan struct{}
+ done chan struct{}
+}
+
+// 启动节点
+func startNode(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.RaftNodeInterFace, s *raft.MemoryStorage, w *wal.WAL) {
+ var err error
+ member := cl.MemberByName(cfg.Name)
+ metadata := pbutil.MustMarshal(
+ &pb.Metadata{
+ NodeID: uint64(member.ID),
+ ClusterID: uint64(cl.ID()),
+ },
+ )
+ if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil {
+ cfg.Logger.Panic("创建WAL失败", zap.Error(err))
+ }
+ if cfg.UnsafeNoFsync { // 非安全存储 默认是 false
+ w.SetUnsafeNoFsync()
+ }
+ peers := make([]raft.Peer, len(ids))
+ for i, id := range ids {
+ var ctx []byte
+ ctx, err = json.Marshal((*cl).Member(id)) // 本机
+ if err != nil {
+ cfg.Logger.Panic("序列化member失败", zap.Error(err))
+ }
+ peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
+ }
+ id = member.ID // 本机ID
+ cfg.Logger.Info(
+ "启动本节点",
+ zap.String("local-member-id", id.String()),
+ zap.String("cluster-id", cl.ID().String()),
+ )
+ s = raft.NewMemoryStorage() // 创建内存存储
+ c := &raft.Config{
+ ID: uint64(id), // 本机ID
+ ElectionTick: cfg.ElectionTicks, // 返回选举权检查对应多少次tick触发次数
+ HeartbeatTick: 1, // 返回心跳检查对应多少次tick触发次数
+ Storage: s, // 存储 memory ✅
+ MaxSizePerMsg: maxSizePerMsg, // 每次发消息的最大size
+ MaxInflightMsgs: maxInflightMsgs, // 512
+ CheckQuorum: true, // 检查是否是leader
+ PreVote: cfg.PreVote, // true // 是否启用PreVote扩展,建议开启
+ Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
+ }
+
+ _ = membership.NewClusterFromURLsMap
+ if len(peers) == 0 {
+ // 不会走这里
+ n = raft.RestartNode(c) // 不会引导peers
+ } else {
+ n = raft.StartNode(c, peers) // ✅✈️ 🚗🚴🏻😁
+ }
+ raftStatusMu.Lock()
+ raftStatus = n.Status
+ raftStatusMu.Unlock()
+ return id, n, s, w
+}
+
+func restartNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.RaftNodeInterFace, *raft.MemoryStorage, *wal.WAL) {
+ var walsnap walpb.Snapshot
+ if snapshot != nil {
+ walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
+ }
+ w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync)
+
+ cfg.Logger.Info(
+ "restarting local member",
+ zap.String("cluster-id", cid.String()),
+ zap.String("local-member-id", id.String()),
+ zap.Uint64("commit-index", st.Commit),
+ )
+ cl := membership.NewCluster(cfg.Logger)
+ cl.SetID(id, cid)
+ s := raft.NewMemoryStorage()
+ if snapshot != nil {
+ s.ApplySnapshot(*snapshot) // 来还原服务宕机前的状态.
+ }
+ s.SetHardState(st) // 从持久化的内存存储中恢复出状态
+ s.Append(ents) // 从持久化的内存存储中恢复出日志
+ c := &raft.Config{
+ ID: uint64(id),
+ ElectionTick: cfg.ElectionTicks, // 返回选举权检查对应多少次tick触发次数
+ HeartbeatTick: 1, // 返回心跳检查对应多少次tick触发次数
+ Storage: s,
+ MaxSizePerMsg: maxSizePerMsg, // 每次发消息的最大size
+ MaxInflightMsgs: maxInflightMsgs,
+ CheckQuorum: true,
+ PreVote: cfg.PreVote, // PreVote 是否启用PreVote
+ Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
+ }
+
+ n := raft.RestartNode(c)
+ raftStatusMu.Lock()
+ raftStatus = n.Status
+ raftStatusMu.Unlock()
+ return id, cl, n, s, w
+}
+
+func restartAsStandaloneNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.RaftNodeInterFace, *raft.MemoryStorage, *wal.WAL) {
+ var walsnap walpb.Snapshot
+ if snapshot != nil {
+ walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
+ }
+ w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync)
+
+ // discard the previously uncommitted entries
+ for i, ent := range ents {
+ if ent.Index > st.Commit {
+ cfg.Logger.Info(
+ "discarding uncommitted WAL entries",
+ zap.Uint64("entry-index", ent.Index),
+ zap.Uint64("commit-index-from-wal", st.Commit),
+ zap.Int("number-of-discarded-entries", len(ents)-i),
+ )
+ ents = ents[:i]
+ break
+ }
+ }
+
+ // force append the configuration change entries
+ toAppEnts := createConfigChangeEnts(
+ cfg.Logger,
+ getIDs(cfg.Logger, snapshot, ents),
+ uint64(id),
+ st.Term,
+ st.Commit,
+ )
+ ents = append(ents, toAppEnts...)
+
+ // force commit newly appended entries
+ err := w.Save(raftpb.HardState{}, toAppEnts)
+ if err != nil {
+ cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err))
+ }
+ if len(ents) != 0 {
+ st.Commit = ents[len(ents)-1].Index
+ }
+
+ cfg.Logger.Info(
+ "forcing restart member",
+ zap.String("cluster-id", cid.String()),
+ zap.String("local-member-id", id.String()),
+ zap.Uint64("commit-index", st.Commit),
+ )
+
+ cl := membership.NewCluster(cfg.Logger)
+ cl.SetID(id, cid)
+ s := raft.NewMemoryStorage()
+ if snapshot != nil {
+ s.ApplySnapshot(*snapshot) // 来还原服务宕机前的状态.
+ }
+ s.SetHardState(st) // 从持久化的内存存储中恢复出状态
+ s.Append(ents) // 从持久化的内存存储中恢复出日志
+ c := &raft.Config{
+ ID: uint64(id),
+ ElectionTick: cfg.ElectionTicks, // 返回选举权检查对应多少次tick触发次数
+ HeartbeatTick: 1, // 返回心跳检查对应多少次tick触发次数
+ Storage: s,
+ MaxSizePerMsg: maxSizePerMsg, // 每次发消息的最大size
+ MaxInflightMsgs: maxInflightMsgs,
+ CheckQuorum: true,
+ PreVote: cfg.PreVote, // PreVote 是否启用PreVote
+ Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
+ }
+
+ n := raft.RestartNode(c)
+ raftStatus = n.Status
+ return id, cl, n, s, w
+}
+
+// getIDs returns an ordered set of IDs included in the given snapshot and
+// the entries. The given snapshot/entries can contain three kinds of
+// ID-related entry:
+// - ConfChangeAddNode, in which case the contained ID will backend added into the set.
+// - ConfChangeRemoveNode, in which case the contained ID will backend removed from the set.
+// - ConfChangeAddLearnerNode, in which the contained ID will backend added into the set.
+func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
+ ids := make(map[uint64]bool)
+ if snap != nil {
+ for _, id := range snap.Metadata.ConfState.Voters {
+ ids[id] = true
+ }
+ }
+ for _, e := range ents {
+ if e.Type != raftpb.EntryConfChange {
+ continue
+ }
+ var cc raftpb.ConfChangeV1
+ pbutil.MustUnmarshal(&cc, e.Data)
+ switch cc.Type {
+ case raftpb.ConfChangeAddLearnerNode:
+ ids[cc.NodeID] = true
+ case raftpb.ConfChangeAddNode:
+ ids[cc.NodeID] = true
+ case raftpb.ConfChangeRemoveNode:
+ delete(ids, cc.NodeID)
+ case raftpb.ConfChangeUpdateNode:
+ // do nothing
+ default:
+ lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String()))
+ }
+ }
+ sids := make(types.Uint64Slice, 0, len(ids))
+ for id := range ids {
+ sids = append(sids, id)
+ }
+ sort.Sort(sids)
+ return []uint64(sids)
+}
+
+// createConfigChangeEnts creates a series of Raft entries (i.e.
+// EntryConfChange) to remove the set of given IDs from the cluster. The ID
+// `self` is _not_ removed, even if present in the set.
+// If `self` is not inside the given ids, it creates a Raft entry to add a
+// default member with the given `self`.
+func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
+ found := false
+ for _, id := range ids {
+ if id == self {
+ found = true
+ }
+ }
+
+ var ents []raftpb.Entry
+ next := index + 1
+
+ // NB: always add self first, then remove other nodes. Raft will panic if the
+ // set of voters ever becomes empty.
+ if !found {
+ m := membership.Member{
+ ID: types.ID(self),
+ RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
+ }
+ ctx, err := json.Marshal(m)
+ if err != nil {
+ lg.Panic("failed to marshal member", zap.Error(err))
+ }
+ cc := &raftpb.ConfChangeV1{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: self,
+ Context: string(ctx),
+ }
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(cc), // ok
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ next++
+ }
+
+ for _, id := range ids {
+ if id == self {
+ continue
+ }
+ cc := &raftpb.ConfChangeV1{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: id,
+ }
+ _ = cc.Marshal
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(cc), // ok
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ next++
+ }
+
+ return ents
+}
+
+// raft.RaftNodeInterFace raft包中没有lock
+func (r *raftNode) tick() {
+ r.tickMu.Lock()
+ r.Tick()
+ r.tickMu.Unlock()
+}
+
+// 心跳触发EtcdServer定时触发 非常重要
+func (r *raftNode) start(rh *raftReadyHandler) {
+ internalTimeout := time.Second
+
+ go func() {
+ defer r.onStop()
+ islead := false
+
+ for {
+ select {
+ case <-r.ticker.C: // 推进心跳或者选举计时器
+ r.tick()
+ // readyc = n.readyc size为0
+ case rd := <-r.Ready(): // 调用Node.Ready(),从返回的channel中获取数据
+ // 获取ready结构中的committedEntries,提交给Apply模块应用到后端存储中.
+ // ReadStates不为空的处理逻辑
+ if rd.SoftState != nil {
+ // SoftState不为空的处理逻辑
+ newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead
+ rh.updateLead(rd.SoftState.Lead)
+ islead = rd.RaftState == raft.StateLeader
+ rh.updateLeadership(newLeader)
+ r.td.Reset()
+ }
+ // ReadStates不为空的处理逻辑 ,线性一致性读的标志
+ if len(rd.ReadStates) != 0 {
+ select {
+ case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
+ case <-time.After(internalTimeout):
+ r.lg.Warn("发送读状态超时", zap.Duration("timeout", internalTimeout))
+ case <-r.stopped:
+ return
+ }
+ }
+
+ // 生成apply请求
+ notifyc := make(chan struct{}, 1)
+ ap := apply{
+ entries: rd.CommittedEntries,
+ snapshot: rd.Snapshot,
+ notifyc: notifyc,
+ }
+ // 更新etcdServer缓存的commitIndex为最新值
+ updateCommittedIndex(&ap, rh)
+
+ select {
+ case r.applyc <- ap: // 将已提交日志应用到状态机
+ case <-r.stopped:
+ return
+ }
+
+ // 如果是Leader发送消息给Follower
+ if islead {
+ // 一旦这里收到rd raft 就会调用acceptReady 将 rn.raft.msgs 置空
+ r.transport.Send(r.processMessages(rd.Messages))
+ }
+
+ // 如果有snapshot
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ // gofail: var raftBeforeSaveSnap struct{}
+ if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
+ r.lg.Fatal("failed to save Raft snapshot", zap.Error(err))
+ }
+ // gofail: var raftAfterSaveSnap struct{}
+ }
+
+ // 将hardState和日志条目保存到WAL中
+ if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
+ r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err))
+ }
+ if !raft.IsEmptyHardState(rd.HardState) {
+ }
+ // gofail: var raftAfterSave struct{}
+
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ // Force WAL to fsync its hard state before Release() releases
+ // old data from the WAL. Otherwise could get an error like:
+ // panic: tocommit(107) is out of range [lastIndex(84)]. Was the raft log corrupted, truncated, or lost?
+ // See https://github.com/etcd-io/etcd/issues/10219 for more details.
+ if err := r.storage.Sync(); err != nil { // 强制wal日志落盘
+ r.lg.Fatal("failed to sync Raft snapshot", zap.Error(err))
+ }
+
+ // etcdserver now claim the snapshot has been persisted onto the disk
+ notifyc <- struct{}{}
+
+ // gofail: var raftBeforeApplySnap struct{}
+ r.raftStorage.ApplySnapshot(rd.Snapshot) // 从持久化的内存存储中恢复出快照
+ r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index))
+ // gofail: var raftAfterApplySnap struct{}
+
+ if err := r.storage.Release(rd.Snapshot); err != nil {
+ r.lg.Fatal("failed to release Raft wal", zap.Error(err))
+ }
+ // gofail: var raftAfterWALRelease struct{}
+ }
+
+ r.raftStorage.Append(rd.Entries) // 从持久化的内存存储中恢复出日志
+
+ if !islead {
+ // 对消息封装成传输协议要求的格式,还会做超时控制
+ msgs := r.processMessages(rd.Messages)
+
+ // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
+ notifyc <- struct{}{}
+
+ // Candidate or follower needs to wait for all pending configuration
+ // changes to backend applied before sending messages.
+ // Otherwise we might incorrectly count votes (e.g. votes from removed members).
+ // Also slow machine's follower raft-layer could proceed to become the leader
+ // on its own single-node cluster, before apply-layer applies the config change.
+ // We simply wait for ALL pending entries to backend applied for now.
+ // We might improve this later on if it causes unnecessary long blocking issues.
+ waitApply := false
+ for _, ent := range rd.CommittedEntries {
+ if ent.Type == raftpb.EntryConfChange {
+ waitApply = true
+ break
+ }
+ }
+ if waitApply {
+ // blocks until 'applyAll' calls 'applyWait.Trigger'
+ // to backend in sync with scheduled config-change job
+ // (assume notifyc has cap of 1)
+ select {
+ case notifyc <- struct{}{}:
+ case <-r.stopped:
+ return
+ }
+ }
+ // 将响应数据返回给对端
+ r.transport.Send(msgs)
+ } else {
+ // leader already processed 'MsgSnap' and signaled
+ notifyc <- struct{}{}
+ }
+ // 更新raft模块的applied index和将日志从unstable转到stable中
+ // 这里需要注意的是,在将已提交日志条目应用到状态机的操作是异步完成的,在Apply完成后,会将结果写到客户端调用进来时注册的channel中.这样一次完整的写操作就完成了.
+ r.Advance()
+ case <-r.stopped:
+ return
+ }
+ }
+ }()
+}
+
+func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
+ var ci uint64
+ if len(ap.entries) != 0 {
+ ci = ap.entries[len(ap.entries)-1].Index
+ }
+ if ap.snapshot.Metadata.Index > ci {
+ ci = ap.snapshot.Metadata.Index
+ }
+ if ci != 0 {
+ rh.updateCommittedIndex(ci)
+ }
+}
+
+// 对消息封装成传输协议要求的格式,还会做超时控制
+func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
+ sentAppResp := false
+ for i := len(ms) - 1; i >= 0; i-- {
+ if r.isIDRemoved(ms[i].To) {
+ ms[i].To = 0
+ }
+
+ if ms[i].Type == raftpb.MsgAppResp {
+ if sentAppResp {
+ ms[i].To = 0
+ } else {
+ sentAppResp = true
+ }
+ }
+
+ if ms[i].Type == raftpb.MsgSnap {
+ // There are two separate data store: the store for v2, and the KV for v3.
+ // The msgSnap only contains the most recent snapshot of store without KV.
+ // So we need to redirect the msgSnap to etcd etcd main loop for merging in the
+ // current store snapshot and KV snapshot.
+ select {
+ case r.msgSnapC <- ms[i]:
+ default:
+ // drop msgSnap if the inflight chan if full.
+ }
+ ms[i].To = 0
+ }
+ if ms[i].Type == raftpb.MsgHeartbeat {
+ ok, exceed := r.td.Observe(ms[i].To)
+ if !ok {
+ // TODO: limit request rate.
+ r.lg.Warn(
+ "leader未能按时发出心跳,时间太长,可能是因为磁盘慢而过载",
+ zap.String("to", fmt.Sprintf("%x", ms[i].To)),
+ zap.Duration("heartbeat-interval", r.heartbeat),
+ zap.Duration("expected-duration", 2*r.heartbeat),
+ zap.Duration("exceeded-duration", exceed),
+ )
+ }
+ }
+ }
+ return ms
+}
+
+func (r *raftNode) apply() chan apply {
+ return r.applyc
+}
+
+func (r *raftNode) stop() {
+ r.stopped <- struct{}{}
+ <-r.done
+}
+
+func (r *raftNode) onStop() {
+ r.Stop()
+ r.ticker.Stop()
+ r.transport.Stop()
+ if err := r.storage.Close(); err != nil {
+ r.lg.Panic("failed to close Raft storage", zap.Error(err))
+ }
+ close(r.done)
+}
+
+// for testing
+func (r *raftNode) pauseSending() {
+ p := r.transport.(rafthttp.Pausable)
+ p.Pause()
+}
+
+func (r *raftNode) resumeSending() {
+ p := r.transport.(rafthttp.Pausable)
+ p.Resume()
+}
+
+// advanceTicks advances ticks of Raft node.
+// This can backend used for fast-forwarding election
+// ticks in multi data-center deployments, thus
+// speeding up election process.
+func (r *raftNode) advanceTicks(ticks int) {
+ for i := 0; i < ticks; i++ {
+ r.tick()
+ }
+}
+
+// Demo 凸(艹皿艹 ) 明明没有实现这个方法啊
+func (r *raftNode) Demo() {
+ _ = r.raftNodeConfig.RaftNodeInterFace
+ // 两层匿名结构体,该字段是个接口
+ _ = r.Step
+ // var _ raft.RaftNodeInterFace = raftNode{}
+}
diff --git a/etcd/etcdserver/over_v3service_cluster.go b/etcd/etcdserver/over_v3service_cluster.go
new file mode 100644
index 00000000000..bd5897bef22
--- /dev/null
+++ b/etcd/etcdserver/over_v3service_cluster.go
@@ -0,0 +1,189 @@
+package etcdserver
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+ "go.uber.org/zap"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+)
+
+// LinearizableReadNotify 一致性读
+func (s *EtcdServer) LinearizableReadNotify(ctx context.Context) error {
+ return s.linearizeReadNotify(ctx)
+}
+
+// AddMember ok
+func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+
+ b, err := json.Marshal(memb)
+ if err != nil {
+ return nil, err
+ }
+
+ lg := s.Logger()
+ // 默认情况下,StrictReconfigCheck是启用的;拒绝不健康的新成员.
+ if !s.Cfg.StrictReconfigCheck {
+ } else {
+ // 添加投票成员时保护法定人数
+ if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() {
+ lg.Warn("拒绝成员添加申请;健康成员个数不足", zap.String("local-member-id", s.ID().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
+ zap.Error(ErrNotEnoughStartedMembers),
+ )
+ return nil, ErrNotEnoughStartedMembers
+ }
+ // 一个心跳间隔之前,是否与所有节点建立了链接
+ if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.VotingMembers()) {
+ lg.Warn(
+ "拒绝成员添加请求;本地成员尚未连接到所有对等体,请重新配置中断活动仲裁",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
+ zap.Error(ErrUnhealthy),
+ )
+ return nil, ErrUnhealthy
+ }
+ }
+ cc := raftpb.ConfChangeV1{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: uint64(memb.ID),
+ Context: string(b),
+ }
+
+ if memb.IsLearner {
+ cc.Type = raftpb.ConfChangeAddLearnerNode
+ }
+
+ return s.configureAndSendRaft(ctx, cc)
+}
+
+// RemoveMember ok
+func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+
+ if err := s.mayRemoveMember(types.ID(id)); err != nil {
+ return nil, err
+ }
+
+ cc := raftpb.ConfChangeV1{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: id,
+ }
+ return s.configureAndSendRaft(ctx, cc)
+}
+
+// UpdateMember ok
+func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+ b, merr := json.Marshal(memb)
+ if merr != nil {
+ return nil, merr
+ }
+
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+ cc := raftpb.ConfChangeV1{
+ Type: raftpb.ConfChangeUpdateNode,
+ NodeID: uint64(memb.ID),
+ Context: string(b),
+ }
+ return s.configureAndSendRaft(ctx, cc)
+}
+
+// PromoteMember 将learner节点提升为voter
+func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ // 只有raft leader有信息,知道learner是否准备好.
+ resp, err := s.promoteMember(ctx, id) // raft已经同步消息了
+ if err == nil {
+ return resp, nil
+ }
+ if err != ErrNotLeader {
+ return resp, err
+ }
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+ // 转发到leader
+ for cctx.Err() == nil {
+ leader, err := s.waitLeader(cctx)
+ if err != nil {
+ return nil, err
+ }
+ for _, url := range leader.PeerURLs {
+ resp, err := promoteMemberHTTP(cctx, url, id, s.peerRt)
+ if err == nil {
+ return resp, nil
+ }
+ if err == ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner {
+ return nil, err
+ }
+ }
+ }
+
+ if cctx.Err() == context.DeadlineExceeded {
+ return nil, ErrTimeout
+ }
+ return nil, ErrCanceled
+}
+
+// promoteMember
+func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+
+ if err := s.mayPromoteMember(types.ID(id)); err != nil {
+ return nil, err
+ }
+
+ promoteChangeContext := membership.ConfigChangeContext{
+ Member: membership.Member{
+ ID: types.ID(id),
+ },
+ IsPromote: true,
+ }
+
+ b, err := json.Marshal(promoteChangeContext)
+ if err != nil {
+ return nil, err
+ }
+
+ cc := raftpb.ConfChangeV1{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: id,
+ Context: string(b),
+ }
+
+ return s.configureAndSendRaft(ctx, cc)
+}
+
+// OK
+func (s *EtcdServer) mayPromoteMember(id types.ID) error {
+ lg := s.Logger()
+ err := s.isLearnerReady(uint64(id)) // 检查learner的同步的数据有没有打到90%
+ if err != nil {
+ return err
+ }
+
+ if !s.Cfg.StrictReconfigCheck { // 严格配置变更检查
+ return nil
+ }
+ if !s.cluster.IsReadyToPromoteMember(uint64(id)) {
+ lg.Warn("拒绝成员提升申请;健康成员个数不足", zap.String("local-member-id", s.ID().String()),
+ zap.String("requested-member-remove-id", id.String()),
+ zap.Error(ErrNotEnoughStartedMembers),
+ )
+ return ErrNotEnoughStartedMembers
+ }
+
+ return nil
+}
diff --git a/etcd/etcdserver/over_v3service_kv.go b/etcd/etcdserver/over_v3service_kv.go
new file mode 100644
index 00000000000..2e98ace943f
--- /dev/null
+++ b/etcd/etcdserver/over_v3service_kv.go
@@ -0,0 +1,149 @@
+package etcdserver
+
+import (
+ "context"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+)
+
+type RaftKV interface {
+ Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
+ Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
+ DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
+ Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error)
+ Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
+}
+
+func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if isTxnReadonly(r) {
+ trace := traceutil.New("transaction", s.Logger(), traceutil.Field{Key: "read_only", Value: true})
+ ctx = context.WithValue(ctx, traceutil.TraceKey, trace)
+ if !isTxnSerializable(r) {
+ err := s.linearizeReadNotify(ctx)
+ trace.Step("在线性读之前,保持raft节点间的一致性")
+ if err != nil {
+ return nil, err
+ }
+ }
+ var resp *pb.TxnResponse
+ var err error
+ chk := func(ai *auth.AuthInfo) error {
+ return checkTxnAuth(s.authStore, ai, r)
+ }
+
+ get := func() { resp, _, err = s.applyV3Base.Txn(ctx, r) }
+ if serr := s.doSerialize(ctx, chk, get); serr != nil {
+ return nil, serr
+ }
+ return resp, err
+ }
+
+ ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now())
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.TxnResponse), nil
+}
+
+func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.DeleteRangeResponse), nil
+}
+
+// Compact 压缩kv历史版本
+func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+ startTime := time.Now()
+ result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
+ trace := traceutil.TODO()
+ if result != nil && result.trace != nil {
+ trace = result.trace
+ applyStart := result.trace.GetStartTime()
+ result.trace.SetStartTime(startTime)
+ trace.InsertStep(0, applyStart, "处理raft请求")
+ }
+ if r.Physical && result != nil && result.physc != nil {
+ <-result.physc
+ // 压实工作已经完成,删除了键;现在哈希已经解决了,但数据不一定被提交.如果出现崩溃,
+ // 如果压实工作恢复,哈希值可能会恢复到压实完成前的哈希值.强制完成的压实到 提交,这样它就不会在崩溃后恢复.
+ s.backend.ForceCommit()
+ trace.Step("物理压实")
+ }
+ if err != nil {
+ return nil, err
+ }
+ if result.err != nil {
+ return nil, result.err
+ }
+ resp := result.resp.(*pb.CompactionResponse)
+ if resp == nil {
+ resp = &pb.CompactionResponse{}
+ }
+ if resp.Header == nil {
+ resp.Header = &pb.ResponseHeader{}
+ }
+ resp.Header.Revision = s.kv.Rev()
+ trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
+ return resp, nil
+}
+
+// RaftRequest myself test
+func (s *EtcdServer) RaftRequest(ctx context.Context, r pb.InternalRaftRequest) {
+ s.raftRequest(ctx, r)
+}
+
+func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ trace := traceutil.New("range", s.Logger(), traceutil.Field{Key: "range_begin", Value: string(r.Key)}, traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)})
+ ctx = context.WithValue(ctx, traceutil.TraceKey, trace) // trace
+ var resp *pb.RangeResponse
+ var err error
+ defer func(start time.Time) {
+ if resp != nil {
+ trace.AddField(
+ traceutil.Field{Key: "response_count", Value: len(resp.Kvs)},
+ traceutil.Field{Key: "response_revision", Value: resp.Header.Revision},
+ )
+ }
+ }(time.Now())
+ // 如果需要线性一致性读,执行 linearizableReadNotify
+ // 此处将会一直阻塞直到 apply index >= read index
+ if !r.Serializable {
+ err = s.linearizeReadNotify(ctx) // 发准备信号,并等待结果
+ trace.Step("在线性化读数之前,raft节点之间的一致.")
+ if err != nil {
+ return nil, err
+ }
+ }
+ // serializable read 会直接读取当前节点的数据返回给客户端,它并不能保证返回给客户端的数据是最新的
+ chk := func(ai *auth.AuthInfo) error {
+ return s.authStore.IsRangePermitted(ai, []byte(r.Key), []byte(r.RangeEnd)) // health,nil
+ }
+
+ get := func() {
+ _ = applierV3backend{}
+ // 执行到这里说明读请求的 apply index >= read index
+ // 可以安全地读 bbolt 进行 read 操作
+ resp, err = s.applyV3Base.Range(ctx, nil, r)
+ }
+ if serr := s.doSerialize(ctx, chk, get); serr != nil {
+ err = serr
+ return nil, err
+ }
+ return resp, err
+}
+
+// Put OK
+func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now())
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.PutResponse), nil
+}
diff --git a/etcd/etcdserver/over_v3service_lease.go b/etcd/etcdserver/over_v3service_lease.go
new file mode 100644
index 00000000000..bc8ad1705d4
--- /dev/null
+++ b/etcd/etcdserver/over_v3service_lease.go
@@ -0,0 +1,141 @@
+package etcdserver
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/lease/leasehttp"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+// 租约 续租
+// 检索租约信息
+// 显示所有存在的租约
+
+type Lessor interface {
+ LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) // 创建租约
+ LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) // 移除租约
+ LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) // 租约 续租
+ LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) // 检索租约信息.
+ LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) // 显示所有租约信息
+}
+
+// LeaseGrant 创建租约
+func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ // 没有提供租约ID,自己生成一个
+ for r.ID == int64(lease.NoLease) {
+ // 只使用正的int64 id
+ r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
+ }
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{LeaseGrant: r})
+ fmt.Println("LeaseGrant--->:", resp)
+
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.LeaseGrantResponse), nil
+}
+
+// LeaseRevoke 移除租约
+func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.LeaseRevokeResponse), nil
+}
+
+// LeaseRenew 租约 续租
+func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
+ ttl, err := s.lessor.Renew(id) // 已经向主要出租人(领导人)提出请求
+ if err == nil { //
+ return ttl, nil
+ }
+ if err != lease.ErrNotPrimary {
+ return -1, err
+ }
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+
+ // renew不通过raft;手动转发给leader
+ for cctx.Err() == nil && err != nil {
+ leader, lerr := s.waitLeader(cctx)
+ if lerr != nil {
+ return -1, lerr
+ }
+ for _, url := range leader.PeerURLs {
+ lurl := url + leasehttp.LeasePrefix
+ ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
+ if err == nil || err == lease.ErrLeaseNotFound {
+ return ttl, err
+ }
+ }
+ // Throttle in case of e.g. connection problems.
+ time.Sleep(50 * time.Millisecond)
+ }
+
+ if cctx.Err() == context.DeadlineExceeded {
+ return -1, ErrTimeout
+ }
+ return -1, ErrCanceled
+}
+
+// LeaseTimeToLive 检索租约信息
+func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ if s.Leader() == s.ID() {
+ le := s.lessor.Lookup(lease.LeaseID(r.ID))
+ if le == nil {
+ return nil, lease.ErrLeaseNotFound
+ }
+ resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
+ if r.Keys {
+ ks := le.Keys()
+ kbs := make([][]byte, len(ks))
+ for i := range ks {
+ kbs[i] = []byte(ks[i])
+ }
+ resp.Keys = kbs
+ }
+ return resp, nil
+ }
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+
+ // 转发到leader
+ for cctx.Err() == nil {
+ leader, err := s.waitLeader(cctx)
+ if err != nil {
+ return nil, err
+ }
+ for _, url := range leader.PeerURLs {
+ //
+ lurl := url + leasehttp.LeaseInternalPrefix // /leases/internal
+ resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
+ if err == nil {
+ return resp.LeaseTimeToLiveResponse, nil
+ }
+ if err == lease.ErrLeaseNotFound {
+ return nil, err
+ }
+ }
+ }
+
+ if cctx.Err() == context.DeadlineExceeded {
+ return nil, ErrTimeout
+ }
+ return nil, ErrCanceled
+}
+
+// LeaseLeases 显示所有租约信息
+func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+ ls := s.lessor.Leases() // 获取当前节点上的所有租约
+ lss := make([]*pb.LeaseStatus, len(ls))
+ for i := range ls {
+ lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)}
+ }
+ return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil
+}
diff --git a/etcd/etcdserver/server.go b/etcd/etcdserver/server.go
new file mode 100644
index 00000000000..da96cb54a5c
--- /dev/null
+++ b/etcd/etcdserver/server.go
@@ -0,0 +1,2229 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "encoding/json"
+ "expvar"
+ "fmt"
+ "math"
+ "math/rand"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/coreos/go-semver/semver"
+ humanize "github.com/dustin/go-humanize"
+ "github.com/ls-2018/etcd_cn/etcd/config"
+ "go.uber.org/zap"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2discovery"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes"
+ stats "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2stats"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3alarm"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3compactor"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex"
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/wal"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/idutil"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/pkg/runtime"
+ "github.com/ls-2018/etcd_cn/pkg/schedule"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+ "github.com/ls-2018/etcd_cn/pkg/wait"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+)
+
+const (
+ DefaultSnapshotCount = 100000
+
+ // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ DefaultSnapshotCatchUpEntries uint64 = 5000
+
+ StoreClusterPrefix = "/0"
+ StoreKeysPrefix = "/1"
+
+ // HealthInterval is the minimum time the cluster should backend healthy
+ // before accepting add member requests.
+ HealthInterval = 5 * time.Second
+
+ purgeFileInterval = 30 * time.Second
+
+ // max number of in-flight snapshot messages etcdserver allows to have
+ // This number is more than enough for most clusters with 5 machines.
+ maxInFlightMsgSnap = 16
+
+ releaseDelayAfterSnapshot = 30 * time.Second
+
+ // maxPendingRevokes is the maximum number of outstanding expired lease revocations.
+ maxPendingRevokes = 16
+
+ recommendedMaxRequestBytes = 10 * 1024 * 1024 // 10M
+
+ readyPercent = 0.9
+
+ DowngradeEnabledPath = "/downgrade/enabled"
+)
+
+var (
+ // monitorVersionInterval should backend smaller than the timeout
+ // on the connection. Or we will not backend able to reuse the connection
+ // (since it will timeout).
+ monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
+
+ recommendedMaxRequestBytesString = humanize.Bytes(uint64(recommendedMaxRequestBytes))
+ storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+
+ expvar.Publish(
+ "file_descriptor_limit",
+ expvar.Func(
+ func() interface{} {
+ n, _ := runtime.FDLimit()
+ return n
+ },
+ ),
+ )
+}
+
+type Response struct {
+ Term uint64
+ Index uint64
+ Event *v2store.Event
+ Watcher v2store.Watcher
+ Err error
+}
+
+type ServerV2 interface {
+ Server
+ Leader() types.ID
+ // Do takes a V2 request and attempts to fulfill it, returning a Response.
+ Do(ctx context.Context, r pb.Request) (Response, error)
+ stats.Stats
+ ClientCertAuthEnabled() bool
+}
+
+type ServerV3 interface {
+ Server
+ RaftStatusGetter
+}
+
+func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
+
+type Server interface {
+ AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) // http 添加节点
+ RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) // http 移除节点
+ UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) // http 更新节点
+ PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) // http 提升节点
+ ClusterVersion() *semver.Version //
+ Cluster() api.Cluster // 返回内部集群cluster 结构体
+ Alarms() []*pb.AlarmMember //
+ LeaderChangedNotify() <-chan struct{} // 领导者变更通知
+ // 1. 当领导层发生变化时,返回的通道将被关闭.
+ // 2. 因此,每一个任期都需要获得新的通道.
+ // 3. 用户可能会因为使用这个API而失去一些连续的频道变化.
+}
+
+// EtcdServer 整个etcd节点的功能的入口,包含etcd节点运行过程中需要的大部分成员.
+type EtcdServer struct {
+ inflightSnapshots int64 // 当前正在发送的snapshot数量
+ appliedIndex uint64 // 已经apply到状态机的日志index
+ committedIndex uint64 // 已经提交的日志index,也就是leader确认多数成员已经同步了的日志index
+ term uint64
+ lead uint64
+ consistIndex cindex.ConsistentIndexer // 已经持久化到kvstore的index
+ r raftNode // 重要的数据结果,存储了raft的状态机信息.
+ readych chan struct{} // 启动成功并注册了自己到cluster,关闭这个通道.
+ Cfg config.ServerConfig // 配置项
+ lgMu *sync.RWMutex
+ lg *zap.Logger
+ w wait.Wait // 为了同步调用情况下让调用者阻塞等待调用结果的.
+ readMu sync.RWMutex // 下面3个结果都是为了实现linearizable 读使用的
+ readwaitc chan struct{} // 通过向readwaitC发送一个空结构体来通知etcd服务器它正在等待读取
+ readNotifier *notifier // 在没有错误时通知read goroutine 可以处理请求
+
+ stop chan struct{} // 停止通道
+ stopping chan struct{} // 停止时关闭这个通道
+ done chan struct{} // etcd的start函数中的循环退出,会关闭这个通道
+ leaderChanged chan struct{} // leader变换后 通知linearizable read loop drop掉旧的读请求
+ leaderChangedMu sync.RWMutex //
+ errorc chan error // 错误通道,用以传入不可恢复的错误,关闭raft状态机.
+ id types.ID // etcd实例id
+ attributes membership.Attributes // etcd实例属性
+ cluster *membership.RaftCluster // 集群信息
+ v2store v2store.Store // v2的kv存储
+ snapshotter *snap.Snapshotter // 用以snapshot
+ applyV2 ApplierV2 // v2的applier,用于将commited index apply到raft状态机
+ applyV3 applierV3 // v3的applier,用于将commited index apply到raft状态机
+ applyV3Base applierV3 // 剥去了鉴权和配额功能的applyV3
+ applyV3Internal applierV3Internal // v3的内部applier
+ applyWait wait.WaitTime // apply的等待队列,等待某个index的日志apply完成
+ kv mvcc.WatchableKV // v3用的kv存储
+ lessor lease.Lessor // v3用,作用是实现过期时间
+ backendLock sync.Mutex // 守护后端存储的锁,改变后端存储和获取后端存储是使用
+ backend backend.Backend // 后端存储 bolt.db
+ beHooks *backendHooks // 存储钩子
+ authStore auth.AuthStore // 存储鉴权数据
+ alarmStore *v3alarm.AlarmStore // 存储告警数据
+ stats *stats.ServerStats // 当前节点状态
+ lstats *stats.LeaderStats // leader状态
+ SyncTicker *time.Ticker // v2用,实现ttl数据过期的
+ compactor v3compactor.Compactor // 压缩数据的周期任务
+ peerRt http.RoundTripper // 用于发送远程请求
+ reqIDGen *idutil.Generator // 用于生成请求id
+ // wgMu blocks concurrent waitgroup mutation while etcd stopping
+ wgMu sync.RWMutex
+ // wg is used to wait for the goroutines that depends on the etcd state
+ // to exit when stopping the etcd.
+ wg sync.WaitGroup
+ ctx context.Context // 用于由etcd发起的请求这些请求可能需要在etcd关机时被后端取消.
+ cancel context.CancelFunc
+ leadTimeMu sync.RWMutex
+ leadElectedTime time.Time
+ firstCommitInTermMu sync.RWMutex
+ firstCommitInTermC chan struct{} // 任期内的第一次commit时创建的
+ *AccessController
+}
+
+// 后端存储钩子
+type backendHooks struct {
+ indexer cindex.ConsistentIndexer // 一致性存储的索引
+ lg *zap.Logger
+ confState raftpb.ConfState // 集群当前的配置信息
+ // first write changes it to 'dirty'. false by default, so
+ // not initialized `confState` is meaningless.
+ confStateDirty bool
+ confStateLock sync.Mutex
+}
+
+func (bh *backendHooks) OnPreCommitUnsafe(tx backend.BatchTx) {
+ bh.indexer.UnsafeSave(tx)
+ bh.confStateLock.Lock()
+ defer bh.confStateLock.Unlock()
+ if bh.confStateDirty {
+ membership.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState)
+ // save bh.confState
+ bh.confStateDirty = false
+ }
+}
+
+func (bh *backendHooks) SetConfState(confState *raftpb.ConfState) {
+ bh.confStateLock.Lock()
+ defer bh.confStateLock.Unlock()
+ bh.confState = *confState
+ bh.confStateDirty = true
+}
+
+type Temp struct {
+ Bepath string
+ W *wal.WAL
+ N raft.RaftNodeInterFace
+ S *raft.MemoryStorage
+ ID types.ID
+ CL *membership.RaftCluster
+ Remotes []*membership.Member
+ Snapshot *raftpb.Snapshot
+ Prt http.RoundTripper
+ SS *snap.Snapshotter
+ ST v2store.Store
+ CI cindex.ConsistentIndexer
+ BeExist bool
+ BeHooks *backendHooks
+ BE backend.Backend
+}
+
+func MySelfStartRaft(cfg config.ServerConfig) (temp *Temp, err error) {
+ temp = &Temp{}
+ temp.ST = v2store.New(StoreClusterPrefix, StoreKeysPrefix) // 创建了一个store结构体 /0 /1
+
+ if cfg.MaxRequestBytes > recommendedMaxRequestBytes { // 10M
+ cfg.Logger.Warn(
+ "超过了建议的请求限度",
+ zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
+ zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
+ zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
+ zap.String("recommended-request-size", recommendedMaxRequestBytesString),
+ )
+ }
+ // 存在也可以
+ if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
+ return nil, fmt.Errorf("无法访问数据目录: %v", terr)
+ }
+
+ haveWAL := wal.Exist(cfg.WALDir()) // default.etcd/member/wal
+ // default.etcd/member/snap
+ if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
+ cfg.Logger.Fatal(
+ "创建快照目录失败",
+ zap.String("path", cfg.SnapDir()),
+ zap.Error(err),
+ )
+ }
+ // 移除格式匹配的文件
+ if err = fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
+ return strings.HasPrefix(fileName, "tmp")
+ }); err != nil {
+ cfg.Logger.Error(
+ "删除快照目录下的临时文件",
+ zap.String("path", cfg.SnapDir()),
+ zap.Error(err),
+ )
+ }
+ // 创建快照struct
+ temp.SS = snap.New(cfg.Logger, cfg.SnapDir())
+
+ temp.Bepath = cfg.BackendPath() // default.etcd/member/snap/db
+ temp.BeExist = fileutil.Exist(temp.Bepath)
+
+ temp.CI = cindex.NewConsistentIndex(nil) // pointer
+ temp.BeHooks = &backendHooks{lg: cfg.Logger, indexer: temp.CI}
+ temp.BE = openBackend(cfg, temp.BeHooks)
+ temp.CI.SetBackend(temp.BE)
+ cindex.CreateMetaBucket(temp.BE.BatchTx())
+
+ // 启动时,判断要不要进行碎片整理
+ if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 {
+ err := maybeDefragBackend(cfg, temp.BE)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ if err != nil {
+ temp.BE.Close()
+ }
+ }()
+ // 服务端的
+ temp.Prt, err = rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
+ if err != nil {
+ return nil, err
+ }
+
+ switch {
+ case !haveWAL && !cfg.NewCluster: // false true 重新加入的成员
+ if err = cfg.VerifyJoinExisting(); err != nil {
+ return nil, err
+ }
+ temp.CL, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
+ if err != nil {
+ return nil, err
+ }
+ existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(temp.CL, cfg.Name), temp.Prt)
+ if gerr != nil {
+ return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
+ }
+ if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, temp.CL, existingCluster); err != nil {
+ return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
+ }
+ if !isCompatibleWithCluster(cfg.Logger, temp.CL, temp.CL.MemberByName(cfg.Name).ID, temp.Prt) {
+ return nil, fmt.Errorf("incompatible with current running cluster")
+ }
+
+ temp.Remotes = existingCluster.Members()
+ temp.CL.SetID(types.ID(0), existingCluster.ID())
+ temp.CL.SetStore(temp.ST)
+ temp.CL.SetBackend(temp.BE)
+ temp.ID, temp.N, temp.S, temp.W = startNode(cfg, temp.CL, nil)
+ temp.CL.SetID(temp.ID, existingCluster.ID())
+
+ case !haveWAL && cfg.NewCluster: // false true 初始新成员
+ if err = cfg.VerifyBootstrap(); err != nil { // 验证peer 通信地址、--initial-advertise-peer-urls" and "--initial-cluster
+ return nil, err
+ }
+ // 创建RaftCluster
+ temp.CL, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
+ if err != nil {
+ return nil, err
+ }
+ m := temp.CL.MemberByName(cfg.Name) // 返回本节点的信息
+ if isMemberBootstrapped(cfg.Logger, temp.CL, cfg.Name, temp.Prt, cfg.BootstrapTimeoutEffective()) {
+ return nil, fmt.Errorf("成员 %s 已经引导过", m.ID)
+ }
+ // TODO 是否使用discovery 发现其他节点
+ if cfg.ShouldDiscover() {
+ var str string
+ str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
+ if err != nil {
+ return nil, &DiscoveryError{Op: "join", Err: err}
+ }
+ var urlsmap types.URLsMap
+ urlsmap, err = types.NewURLsMap(str)
+ if err != nil {
+ return nil, err
+ }
+ if config.CheckDuplicateURL(urlsmap) {
+ return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
+ }
+ if temp.CL, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
+ return nil, err
+ }
+ }
+ temp.CL.SetStore(temp.ST) // 结构体
+ temp.CL.SetBackend(temp.BE)
+ // 启动节点
+ temp.ID, temp.N, temp.S, temp.W = startNode(cfg, temp.CL, temp.CL.MemberIDs()) // ✅✈️ 🚗🚴🏻😁
+ temp.CL.SetID(temp.ID, temp.CL.ID())
+
+ case haveWAL:
+ if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
+ return nil, fmt.Errorf("cannot write to member directory: %v", err)
+ }
+
+ if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
+ return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
+ }
+
+ if cfg.ShouldDiscover() {
+ cfg.Logger.Warn(
+ "discovery token is ignored since cluster already initialized; valid logs are found",
+ zap.String("wal-dir", cfg.WALDir()),
+ )
+ }
+
+ // Find a snapshot to start/restart a raft node
+ walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
+ if err != nil {
+ return nil, err
+ }
+ // snapshot files can backend orphaned if etcd crashes after writing them but before writing the corresponding
+ // wal log entries
+ temp.Snapshot, err = temp.SS.LoadNewestAvailable(walSnaps)
+ if err != nil && err != snap.ErrNoSnapshot {
+ return nil, err
+ }
+
+ if temp.Snapshot != nil {
+ if err = temp.ST.Recovery(temp.Snapshot.Data); err != nil {
+ cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err))
+ }
+
+ if err = assertNoV2StoreContent(cfg.Logger, temp.ST, cfg.V2Deprecation); err != nil {
+ cfg.Logger.Error("illegal v2store content", zap.Error(err))
+ return nil, err
+ }
+
+ cfg.Logger.Info(
+ "recovered v2 store from snapshot",
+ zap.Uint64("snapshot-index", temp.Snapshot.Metadata.Index),
+ zap.String("snapshot-size", humanize.Bytes(uint64(temp.Snapshot.Size()))),
+ )
+
+ if temp.BE, err = recoverSnapshotBackend(cfg, temp.BE, *temp.Snapshot, temp.BeExist, temp.BeHooks); err != nil {
+ cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
+ }
+ // A snapshot db may have already been recovered, and the old db should have
+ // already been closed in this case, so we should set the backend again.
+ temp.CI.SetBackend(temp.BE)
+ s1, s2 := temp.BE.Size(), temp.BE.SizeInUse()
+ cfg.Logger.Info(
+ "recovered v3 backend from snapshot",
+ zap.Int64("backend-size-bytes", s1),
+ zap.String("backend-size", humanize.Bytes(uint64(s1))),
+ zap.Int64("backend-size-in-use-bytes", s2),
+ zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
+ )
+ } else {
+ cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!")
+ }
+
+ if !cfg.ForceNewCluster {
+ temp.ID, temp.CL, temp.N, temp.S, temp.W = restartNode(cfg, temp.Snapshot)
+ } else {
+ temp.ID, temp.CL, temp.N, temp.S, temp.W = restartAsStandaloneNode(cfg, temp.Snapshot)
+ }
+
+ temp.CL.SetStore(temp.ST)
+ temp.CL.SetBackend(temp.BE)
+ temp.CL.Recover(api.UpdateCapability)
+ if temp.CL.Version() != nil && !temp.CL.Version().LessThan(semver.Version{Major: 3}) && !temp.BeExist {
+ os.RemoveAll(temp.Bepath)
+ return nil, fmt.Errorf("database file (%v) of the backend is missing", temp.Bepath)
+ }
+
+ default:
+ return nil, fmt.Errorf("不支持的引导配置")
+ }
+
+ if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
+ return nil, fmt.Errorf("不能访问成员目录: %v", terr)
+ }
+
+ return
+}
+
+// NewServer 根据提供的配置创建一个新的EtcdServer.在EtcdServer的生命周期内,该配置被认为是静态的.
+func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
+ temp := &Temp{}
+ temp, err = MySelfStartRaft(cfg) // 逻辑时钟初始化
+ serverStats := stats.NewServerStats(cfg.Name, temp.ID.String())
+ leaderStats := stats.NewLeaderStats(cfg.Logger, temp.ID.String())
+
+ heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
+ srv = &EtcdServer{
+ readych: make(chan struct{}),
+ Cfg: cfg,
+ lgMu: new(sync.RWMutex),
+ lg: cfg.Logger,
+ errorc: make(chan error, 1),
+ v2store: temp.ST,
+ snapshotter: temp.SS,
+ r: *newRaftNode(
+ raftNodeConfig{
+ lg: cfg.Logger,
+ isIDRemoved: func(id uint64) bool { return temp.CL.IsIDRemoved(types.ID(id)) },
+ RaftNodeInterFace: temp.N,
+ heartbeat: heartbeat,
+ raftStorage: temp.S,
+ storage: NewStorage(temp.W, temp.SS),
+ },
+ ),
+ id: temp.ID,
+ attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
+ cluster: temp.CL,
+ stats: serverStats,
+ lstats: leaderStats,
+ SyncTicker: time.NewTicker(500 * time.Millisecond),
+ peerRt: temp.Prt,
+ reqIDGen: idutil.NewGenerator(uint16(temp.ID), time.Now()),
+ AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
+ consistIndex: temp.CI,
+ firstCommitInTermC: make(chan struct{}),
+ }
+ srv.applyV2 = NewApplierV2(cfg.Logger, srv.v2store, srv.cluster)
+
+ srv.backend = temp.BE
+ srv.beHooks = temp.BeHooks
+ // 可能为了确保发生leader选举时,lease不会过期,最小ttl应该比选举时间长,看代码
+ minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
+ // 默认的情况下应该是2s,
+
+ // 始终在KV之前恢复出租人.当我们恢复mvcc.KV时,它将把钥匙重新连接到它的租约上.如果我们先恢复mvcc.KV,它将在恢复前把钥匙附加到错误的出租人上.
+ srv.lessor = lease.NewLessor(srv.Logger(), srv.backend, srv.cluster, lease.LessorConfig{
+ MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())),
+ CheckpointInterval: cfg.LeaseCheckpointInterval,
+ CheckpointPersist: cfg.LeaseCheckpointPersist,
+ ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(),
+ })
+
+ tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken, // 认证格式 simple、jwt
+ func(index uint64) <-chan struct{} {
+ return srv.applyWait.Wait(index)
+ },
+ time.Duration(cfg.TokenTTL)*time.Second,
+ )
+ if err != nil {
+ cfg.Logger.Warn("创建令牌提供程序失败", zap.Error(err))
+ return nil, err
+ }
+ // watch | kv ...
+ srv.kv = mvcc.New(srv.Logger(), srv.backend, srv.lessor, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit})
+
+ kvindex := temp.CI.ConsistentIndex()
+ srv.lg.Debug("恢复consistentIndex", zap.Uint64("index", kvindex))
+ if temp.BeExist {
+ // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
+ // etcd from pre-3.0 release.
+ if temp.Snapshot != nil && kvindex < temp.Snapshot.Metadata.Index {
+ if kvindex != 0 {
+ return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", temp.Bepath, kvindex, temp.Snapshot.Metadata.Index)
+ }
+ cfg.Logger.Warn(
+ "consistent index was never saved",
+ zap.Uint64("snapshot-index", temp.Snapshot.Metadata.Index),
+ )
+ }
+ }
+
+ srv.authStore = auth.NewAuthStore(srv.Logger(), srv.backend, tp, int(cfg.BcryptCost)) // BcryptCost 为散列身份验证密码指定bcrypt算法的成本/强度默认10
+
+ newSrv := srv // since srv == nil in defer if srv is returned as nil
+ defer func() {
+ // closing backend without first closing kv can cause
+ // resumed compactions to fail with closed tx errors
+ if err != nil {
+ newSrv.kv.Close()
+ }
+ }()
+ if num := cfg.AutoCompactionRetention; num != 0 {
+ srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv)
+ if err != nil {
+ return nil, err
+ }
+ srv.compactor.Run()
+ }
+
+ srv.applyV3Base = srv.newApplierV3Backend()
+ srv.applyV3Internal = srv.newApplierV3Internal()
+ // 启动时重置所有警报
+ if err = srv.restoreAlarms(); err != nil {
+ return nil, err
+ }
+
+ if srv.Cfg.EnableLeaseCheckpoint {
+ // 通过设置checkpointer使能租期检查点功能.
+ srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) {
+ // 定期批量地将 Lease 剩余的 TTL 基于 Raft Log 同步给 Follower 节点,Follower 节点收到 CheckPoint 请求后,
+ // 更新内存数据结构 LeaseMap 的剩余 TTL 信息.
+ srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp})
+ })
+ }
+
+ // TODO: move transport initialization near the definition of remote
+ tr := &rafthttp.Transport{
+ Logger: cfg.Logger,
+ TLSInfo: cfg.PeerTLSInfo,
+ DialTimeout: cfg.PeerDialTimeout(),
+ ID: temp.ID,
+ URLs: cfg.PeerURLs,
+ ClusterID: temp.CL.ID(),
+ Raft: srv,
+ Snapshotter: temp.SS,
+ ServerStats: serverStats,
+ LeaderStats: leaderStats,
+ ErrorC: srv.errorc,
+ }
+ if err = tr.Start(); err != nil {
+ return nil, err
+ }
+ // add all remotes into transport
+ for _, m := range temp.Remotes {
+ if m.ID != temp.ID {
+ tr.AddRemote(m.ID, m.PeerURLs)
+ }
+ }
+ for _, m := range temp.CL.Members() {
+ if m.ID != temp.ID {
+ tr.AddPeer(m.ID, m.PeerURLs)
+ }
+ }
+ srv.r.transport = tr
+
+ return srv, nil
+}
+
+// assertNoV2StoreContent -> depending on the deprecation stage, warns or report an error
+// if the v2store contains custom content.
+func assertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error {
+ metaOnly, err := membership.IsMetaStoreOnly(st)
+ if err != nil {
+ return err
+ }
+ if metaOnly {
+ return nil
+ }
+ if deprecationStage.IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) {
+ return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage)
+ }
+ lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.")
+ return nil
+}
+
+func (s *EtcdServer) Logger() *zap.Logger {
+ s.lgMu.RLock()
+ l := s.lg
+ s.lgMu.RUnlock()
+ return l
+}
+
+func tickToDur(ticks int, tickMs uint) string {
+ return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond)
+}
+
+func (s *EtcdServer) adjustTicks() {
+ lg := s.Logger()
+ clusterN := len(s.cluster.Members())
+
+ // single-node fresh start, or single-node recovers from snapshot
+ if clusterN == 1 {
+ ticks := s.Cfg.ElectionTicks - 1
+ lg.Info(
+ "started as single-node; fast-forwarding election ticks",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int("forward-ticks", ticks),
+ zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
+ zap.Int("election-ticks", s.Cfg.ElectionTicks),
+ zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
+ )
+ s.r.advanceTicks(ticks)
+ return
+ }
+
+ if !s.Cfg.InitialElectionTickAdvance {
+ lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
+ return
+ }
+ lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
+
+ // retry up to "rafthttp.ConnReadTimeout", which is 5-sec
+ // until peer connection reports; otherwise:
+ // 1. all connections failed, or
+ // 2. no active peers, or
+ // 3. restarted single-node with no snapshot
+ // then, do nothing, because advancing ticks would have no effect
+ waitTime := rafthttp.ConnReadTimeout
+ itv := 50 * time.Millisecond
+ for i := int64(0); i < int64(waitTime/itv); i++ {
+ select {
+ case <-time.After(itv):
+ case <-s.stopping:
+ return
+ }
+
+ peerN := s.r.transport.ActivePeers()
+ if peerN > 1 {
+ // multi-node received peer connection reports
+ // adjust ticks, in case slow leader message receive
+ ticks := s.Cfg.ElectionTicks - 2
+
+ lg.Info(
+ "initialized peer connections; fast-forwarding election ticks",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int("forward-ticks", ticks),
+ zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
+ zap.Int("election-ticks", s.Cfg.ElectionTicks),
+ zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
+ zap.Int("active-remote-members", peerN),
+ )
+
+ s.r.advanceTicks(ticks)
+ return
+ }
+ }
+}
+
+func (s *EtcdServer) Start() {
+ s.start()
+ s.GoAttach(func() { s.adjustTicks() })
+ s.GoAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
+ s.GoAttach(s.purgeFile)
+ s.GoAttach(s.monitorVersions)
+ s.GoAttach(s.linearizableReadLoop)
+ s.GoAttach(s.monitorKVHash)
+ s.GoAttach(s.monitorDowngrade)
+}
+
+func (s *EtcdServer) start() {
+ lg := s.Logger()
+
+ if s.Cfg.SnapshotCount == 0 { // 触发一次磁盘快照的提交事务的次数
+ lg.Info("更新快照数量为默认值",
+ zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount), // 触发一次磁盘快照的提交事务的次数
+ zap.Uint64("updated-snapshot-count", DefaultSnapshotCount),
+ )
+ s.Cfg.SnapshotCount = DefaultSnapshotCount // 触发一次磁盘快照的提交事务的次数
+ }
+ if s.Cfg.SnapshotCatchUpEntries == 0 {
+ lg.Info("将快照追赶条目更新为默认条目",
+ zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries),
+ zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries),
+ )
+ s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries
+ }
+
+ s.w = wait.New()
+ s.applyWait = wait.NewTimeList()
+ s.done = make(chan struct{})
+ s.stop = make(chan struct{})
+ s.stopping = make(chan struct{}, 1)
+ s.ctx, s.cancel = context.WithCancel(context.Background())
+ s.readwaitc = make(chan struct{}, 1)
+ s.readNotifier = newNotifier()
+ s.leaderChanged = make(chan struct{})
+ if s.ClusterVersion() != nil {
+ lg.Info("启动etcd", zap.String("local-member-id", s.ID().String()),
+ zap.String("local-etcd-version", version.Version),
+ zap.String("cluster-id", s.Cluster().ID().String()),
+ zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())),
+ )
+ } else {
+ lg.Info("启动etcd", zap.String("local-member-id", s.ID().String()),
+ zap.String("local-etcd-version", version.Version), zap.String("cluster-version", "to_be_decided"))
+ }
+
+ go s.run()
+}
+
+func (s *EtcdServer) purgeFile() {
+ lg := s.Logger()
+ var dberrc, serrc, werrc <-chan error
+ var dbdonec, sdonec, wdonec <-chan struct{}
+ if s.Cfg.MaxSnapFiles > 0 {
+ dbdonec, dberrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
+ sdonec, serrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
+ }
+ if s.Cfg.MaxWALFiles > 0 {
+ wdonec, werrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping)
+ }
+
+ select {
+ case e := <-dberrc:
+ lg.Fatal("failed to purge snap db file", zap.Error(e))
+ case e := <-serrc:
+ lg.Fatal("failed to purge snap file", zap.Error(e))
+ case e := <-werrc:
+ lg.Fatal("failed to purge wal file", zap.Error(e))
+ case <-s.stopping:
+ if dbdonec != nil {
+ <-dbdonec
+ }
+ if sdonec != nil {
+ <-sdonec
+ }
+ if wdonec != nil {
+ <-wdonec
+ }
+ return
+ }
+}
+
+type ServerPeer interface {
+ ServerV2
+ RaftHandler() http.Handler
+ LeaseHandler() http.Handler
+}
+
+func (s *EtcdServer) RaftHandler() http.Handler {
+ return s.r.transport.Handler()
+}
+
+type ServerPeerV2 interface {
+ ServerPeer
+ HashKVHandler() http.Handler
+ DowngradeEnabledHandler() http.Handler
+}
+
+func (s *EtcdServer) ReportUnreachable(id uint64) {
+ s.r.ReportUnreachable(id)
+}
+
+// ReportSnapshot reports snapshot sent status to the raft state machine,
+// and clears the used snapshot from the snapshot store.
+func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
+ s.r.ReportSnapshot(id, status)
+}
+
+type etcdProgress struct {
+ confState raftpb.ConfState
+ snapi uint64
+ appliedt uint64
+ appliedi uint64
+}
+
+// raftReadyHandler contains a set of EtcdServer operations to backend called by raftNode,
+// and helps decouple state machine logic from Raft algorithms.
+// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
+type raftReadyHandler struct {
+ getLead func() (lead uint64)
+ updateLead func(lead uint64)
+ updateLeadership func(newLeader bool)
+ updateCommittedIndex func(uint64)
+}
+
+func (s *EtcdServer) run() {
+ lg := s.Logger()
+
+ sn, err := s.r.raftStorage.Snapshot()
+ if err != nil {
+ lg.Panic("从Raft存储获取快照失败", zap.Error(err))
+ }
+
+ // asynchronously accept apply packets, dispatch progress in-order
+ sched := schedule.NewFIFOScheduler()
+
+ var (
+ smu sync.RWMutex
+ syncC <-chan time.Time
+ )
+ setSyncC := func(ch <-chan time.Time) {
+ smu.Lock()
+ syncC = ch
+ smu.Unlock()
+ }
+ getSyncC := func() (ch <-chan time.Time) {
+ smu.RLock()
+ ch = syncC
+ smu.RUnlock()
+ return
+ }
+ rh := &raftReadyHandler{
+ getLead: func() (lead uint64) { return s.getLead() },
+ updateLead: func(lead uint64) { s.setLead(lead) },
+ updateLeadership: func(newLeader bool) {
+ if !s.isLeader() {
+ // 自己不是leader了
+ if s.lessor != nil {
+ s.lessor.Demote() // 持久化所有租约
+ }
+ if s.compactor != nil {
+ s.compactor.Pause()
+ }
+ setSyncC(nil)
+ } else {
+ if newLeader {
+ t := time.Now()
+ s.leadTimeMu.Lock()
+ s.leadElectedTime = t
+ s.leadTimeMu.Unlock()
+ }
+ setSyncC(s.SyncTicker.C)
+ if s.compactor != nil {
+ s.compactor.Resume()
+ }
+ }
+ if newLeader {
+ s.leaderChangedMu.Lock()
+ lc := s.leaderChanged
+ s.leaderChanged = make(chan struct{})
+ close(lc)
+ s.leaderChangedMu.Unlock()
+ }
+ // TODO: remove the nil checking
+ // current test utility does not provide the stats
+ if s.stats != nil {
+ s.stats.BecomeLeader()
+ }
+ },
+ updateCommittedIndex: func(ci uint64) {
+ cci := s.getCommittedIndex()
+ if ci > cci {
+ s.setCommittedIndex(ci)
+ }
+ },
+ }
+ s.r.start(rh)
+
+ ep := etcdProgress{
+ confState: sn.Metadata.ConfState,
+ snapi: sn.Metadata.Index,
+ appliedt: sn.Metadata.Term,
+ appliedi: sn.Metadata.Index,
+ }
+
+ defer func() {
+ s.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping
+ close(s.stopping)
+ s.wgMu.Unlock()
+ s.cancel()
+ sched.Stop()
+
+ // wait for gouroutines before closing raft so wal stays open
+ s.wg.Wait()
+
+ s.SyncTicker.Stop()
+
+ // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
+ // by adding a peer after raft stops the transport
+ s.r.stop()
+
+ s.Cleanup()
+
+ close(s.done)
+ }()
+ var expiredLeaseC <-chan []*lease.Lease // 返回一个用于接收过期租约的CHAN.
+ if s.lessor != nil { // v3用,作用是实现过期时间
+ expiredLeaseC = s.lessor.ExpiredLeasesC()
+ }
+
+ for {
+ select {
+ case ap := <-s.r.apply():
+ // 集群启动时,会先apply两条消息
+ // index1:EntryConfChange {"Type":0,"NodeID":10276657743932975437,"Context":"{\"id\":10276657743932975437,\"peerURLs\":[\"http://localhost:2380\"],\"name\":\"default\"}","ID":0}
+ // index2:EntryNormal nil 用于任期内第一次commit
+ // index3:EntryNormal {"ID":7587861549007417858,"Method":"PUT","Path":"/0/members/8e9e05c52164694d/attributes","Val":"{\"name\":\"default\",\"clientURLs\":[\"http://localhost:2379\"]}","Dir":false,"PrevValue":"","PrevIndex":0,"Expiration":0,"Wait":false,"Since":0,"Recursive":false,"Sorted":false,"Quorum":false,"Time":0,"Stream":false}
+ // 读取 放入applyc的消息
+ f := func(context.Context) {
+ s.applyAll(&ep, &ap)
+ }
+ sched.Schedule(f)
+ case leases := <-expiredLeaseC:
+ s.GoAttach(func() {
+ // 通过并行化增加过期租约删除过程的吞吐量
+ c := make(chan struct{}, maxPendingRevokes) // 控制每一批 并发数为16
+ for _, lease := range leases {
+ select {
+ case c <- struct{}{}:
+ case <-s.stopping:
+ return
+ }
+ lid := lease.ID
+ s.GoAttach(func() {
+ ctx := s.authStore.WithRoot(s.ctx)
+ _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
+ if lerr == nil {
+ } else {
+ lg.Warn("移除租约失败", zap.String("lease-id", fmt.Sprintf("%016x", lid)), zap.Error(lerr))
+ }
+ <-c
+ })
+ }
+ })
+ case err := <-s.errorc:
+ lg.Warn("etcd error", zap.Error(err))
+ lg.Warn("本机使用的data-dir必须移除")
+ return
+ case <-getSyncC():
+ if s.v2store.HasTTLKeys() {
+ s.sync(s.Cfg.ReqTimeout())
+ }
+ case <-s.stop:
+ return
+ }
+ }
+}
+
+// Cleanup removes allocated objects by EtcdServer.NewServer in
+// situation that EtcdServer::Start was not called (that takes care of cleanup).
+func (s *EtcdServer) Cleanup() {
+ // kv, lessor and backend can backend nil if running without v3 enabled
+ // or running unit tests.
+ if s.lessor != nil {
+ s.lessor.Stop()
+ }
+ if s.kv != nil {
+ s.kv.Close()
+ }
+ if s.authStore != nil {
+ s.authStore.Close()
+ }
+ if s.backend != nil {
+ s.backend.Close()
+ }
+ if s.compactor != nil {
+ s.compactor.Stop()
+ }
+}
+
+func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
+ s.applySnapshot(ep, apply) // 从持久化的内存存储中恢复出快照
+ s.applyEntries(ep, apply)
+
+ s.applyWait.Trigger(ep.appliedi)
+
+ // wait for the raft routine to finish the disk writes before triggering a
+ // snapshot. or applied index might backend greater than the last index in raft
+ // storage, since the raft routine might backend slower than apply routine.
+ <-apply.notifyc
+
+ s.triggerSnapshot(ep)
+ select {
+ // snapshot requested via send()
+ case m := <-s.r.msgSnapC:
+ merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
+ s.sendMergedSnap(merged)
+ default:
+ }
+}
+
+func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
+ if raft.IsEmptySnap(apply.snapshot) {
+ return
+ }
+
+ lg := s.Logger()
+ lg.Info("开始应用快照",
+ zap.Uint64("current-snapshot-index", ep.snapi),
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
+ )
+ defer func() {
+ lg.Info("已应用快照",
+ zap.Uint64("current-snapshot-index", ep.snapi),
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
+ )
+ }()
+
+ if apply.snapshot.Metadata.Index <= ep.appliedi {
+ lg.Panic("意外得到 来自过时索引的领导者快照",
+ zap.Uint64("current-snapshot-index", ep.snapi),
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
+ )
+ }
+
+ // 等待raftnode持久化快找到硬盘上
+ <-apply.notifyc
+
+ newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot, s.beHooks)
+ if err != nil {
+ lg.Panic("failed to open snapshot backend", zap.Error(err))
+ }
+
+ // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
+ // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
+ if s.lessor != nil {
+ lg.Info("restoring lease store")
+
+ s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write(traceutil.TODO()) })
+
+ lg.Info("restored lease store")
+ }
+
+ lg.Info("restoring mvcc store")
+
+ if err := s.kv.Restore(newbe); err != nil {
+ lg.Panic("failed to restore mvcc store", zap.Error(err))
+ }
+
+ s.consistIndex.SetBackend(newbe)
+ lg.Info("restored mvcc store", zap.Uint64("consistent-index", s.consistIndex.ConsistentIndex()))
+
+ // Closing old backend might block until all the txns
+ // on the backend are finished.
+ // We do not want to wait on closing the old backend.
+ s.backendLock.Lock()
+ oldbe := s.backend
+ go func() {
+ lg.Info("closing old backend file")
+ defer func() {
+ lg.Info("closed old backend file")
+ }()
+ if err := oldbe.Close(); err != nil {
+ lg.Panic("failed to close old backend", zap.Error(err))
+ }
+ }()
+
+ s.backend = newbe
+ s.backendLock.Unlock()
+
+ lg.Info("restoring alarm store")
+
+ if err := s.restoreAlarms(); err != nil {
+ lg.Panic("failed to restore alarm store", zap.Error(err))
+ }
+
+ lg.Info("restored alarm store")
+
+ if s.authStore != nil {
+ lg.Info("restoring auth store")
+
+ s.authStore.Recover(newbe)
+
+ lg.Info("restored auth store")
+ }
+
+ lg.Info("restoring v2 store")
+ if err := s.v2store.Recovery(apply.snapshot.Data); err != nil {
+ lg.Panic("failed to restore v2 store", zap.Error(err))
+ }
+
+ if err := assertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil {
+ lg.Panic("illegal v2store content", zap.Error(err))
+ }
+
+ lg.Info("restored v2 store")
+
+ s.cluster.SetBackend(newbe)
+
+ lg.Info("restoring cluster configuration")
+
+ s.cluster.Recover(api.UpdateCapability)
+
+ lg.Info("restored cluster configuration")
+ lg.Info("removing old peers from network")
+
+ // recover raft transport
+ s.r.transport.RemoveAllPeers()
+
+ lg.Info("removed old peers from network")
+ lg.Info("adding peers from new cluster configuration")
+
+ for _, m := range s.cluster.Members() {
+ if m.ID == s.ID() {
+ continue
+ }
+ s.r.transport.AddPeer(m.ID, m.PeerURLs)
+ }
+
+ lg.Info("added peers from new cluster configuration")
+
+ ep.appliedt = apply.snapshot.Metadata.Term
+ ep.appliedi = apply.snapshot.Metadata.Index
+ ep.snapi = ep.appliedi
+ ep.confState = apply.snapshot.Metadata.ConfState
+}
+
+func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
+ if len(apply.entries) == 0 {
+ return
+ }
+ firsti := apply.entries[0].Index
+ if firsti > ep.appliedi+1 {
+ lg := s.Logger()
+ lg.Panic("意外的 已提交索引",
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("first-committed-entry-index", firsti),
+ )
+ }
+ var ents []raftpb.Entry
+ if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
+ ents = apply.entries[ep.appliedi+1-firsti:]
+ }
+ if len(ents) == 0 {
+ return
+ }
+ var shouldstop bool
+ if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
+ go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf(""))
+ }
+}
+
+func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
+ if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount { // 触发一次磁盘快照的提交事务的次数
+ return
+ }
+
+ lg := s.Logger()
+ lg.Info("触发打快照",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Uint64("local-member-applied-index", ep.appliedi),
+ zap.Uint64("local-member-snapshot-index", ep.snapi),
+ zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount),
+ )
+
+ s.snapshot(ep.appliedi, ep.confState)
+ ep.snapi = ep.appliedi
+}
+
+func (s *EtcdServer) hasMultipleVotingMembers() bool {
+ return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1
+}
+
+func (s *EtcdServer) isLeader() bool {
+ return uint64(s.ID()) == s.Lead()
+}
+
+// MoveLeader leader转移
+func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
+ if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner {
+ return ErrBadLeaderTransferee
+ }
+
+ now := time.Now()
+ interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
+
+ lg := s.Logger()
+ lg.Info(
+ "开始leader转移",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("current-leader-member-id", types.ID(lead).String()),
+ zap.String("transferee-member-id", types.ID(transferee).String()),
+ )
+
+ s.r.TransferLeadership(ctx, lead, transferee) // 开始leader转移
+ for s.Lead() != transferee {
+ select {
+ case <-ctx.Done(): // time out
+ return ErrTimeoutLeaderTransfer
+ case <-time.After(interval):
+ }
+ }
+
+ // 耗尽所有请求,或者驱逐掉所有就leader的消息
+ lg.Info(
+ "leader转移完成",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("old-leader-member-id", types.ID(lead).String()),
+ zap.String("new-leader-member-id", types.ID(transferee).String()),
+ zap.Duration("took", time.Since(now)),
+ )
+ return nil
+}
+
+func (s *EtcdServer) TransferLeadership() error {
+ lg := s.Logger()
+ if !s.isLeader() {
+ lg.Info(
+ "skipped leadership transfer; local etcd is not leader",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
+ )
+ return nil
+ }
+
+ if !s.hasMultipleVotingMembers() {
+ lg.Info(
+ "skipped leadership transfer for single voting member cluster",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
+ )
+ return nil
+ }
+
+ transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs())
+ if !ok {
+ return ErrUnhealthy
+ }
+
+ tm := s.Cfg.ReqTimeout()
+ ctx, cancel := context.WithTimeout(s.ctx, tm)
+ err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
+ cancel()
+ return err
+}
+
+// HardStop 在不与集群中其他成员协调的情况下停止etcd.
+func (s *EtcdServer) HardStop() {
+ select {
+ case s.stop <- struct{}{}:
+ case <-s.done:
+ return
+ }
+ <-s.done
+}
+
+// Stop 优雅停止本节点, 如果是leader要等leader转移
+func (s *EtcdServer) Stop() {
+ lg := s.Logger()
+ if err := s.TransferLeadership(); err != nil {
+ lg.Warn("leader转移失败", zap.String("local-member-id", s.ID().String()), zap.Error(err))
+ }
+ s.HardStop()
+}
+
+// ReadyNotify 当etcd 准备好服务请求后,会关闭ready ch
+func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
+
+func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
+ select {
+ case <-time.After(d):
+ case <-s.done:
+ }
+ select {
+ case s.errorc <- err:
+ default:
+ }
+}
+
+// StopNotify 当etcd停止时、会往此channel发送 empty struct
+// when the etcd is stopped.
+func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
+
+// StoppingNotify returns a channel that receives a empty struct
+// when the etcd is being stopped.
+func (s *EtcdServer) StoppingNotify() <-chan struct{} { return s.stopping }
+
+func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
+
+func (s *EtcdServer) LeaderStats() []byte {
+ lead := s.getLead()
+ if lead != uint64(s.id) {
+ return nil
+ }
+ return s.lstats.JSON()
+}
+
+func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() }
+
+// 检查节点操作的权限
+func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
+ _ = auth.NewAuthStore
+ if s.authStore == nil {
+ // 在普通的etcd进程中,s.authStore永远不会为零.这个分支是为了处理server_test.go中的情况
+ return nil
+ }
+
+ // 请注意,这个权限检查是在API层完成的,所以TOCTOU问题可能会在这样的时间表中引起:
+ // 更新用户A的会员资格------撤销A的根角色------在状态机层应用会员资格的改变
+ // 然而,会员资格的改变和角色管理都需要根权限.所以管理员的谨慎操作可以防止这个问题.
+ authInfo, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+
+ return s.AuthStore().IsAdminPermitted(authInfo)
+}
+
+// 检查learner是否追上了leader
+// 注意:如果在集群中没有找到成员,或者成员不是学习者,它将返回nil.
+// 这两个条件将在后面的应用阶段之前进行后台检查.
+func (s *EtcdServer) isLearnerReady(id uint64) error {
+ rs := s.raftStatus()
+ if rs.Progress == nil {
+ return ErrNotLeader
+ }
+
+ var learnerMatch uint64
+ isFound := false
+ leaderID := rs.ID
+ for memberID, progress := range rs.Progress {
+ if id == memberID {
+ learnerMatch = progress.Match
+ isFound = true
+ break
+ }
+ }
+
+ if isFound {
+ leaderMatch := rs.Progress[leaderID].Match
+ // learner的进度还没有赶上领导者
+ if float64(learnerMatch) < float64(leaderMatch)*readyPercent {
+ return ErrLearnerNotReady
+ }
+ }
+ return nil
+}
+
+func (s *EtcdServer) mayRemoveMember(id types.ID) error {
+ if !s.Cfg.StrictReconfigCheck { // 严格配置变更检查
+ return nil
+ }
+
+ lg := s.Logger()
+ isLearner := s.cluster.IsMemberExist(id) && s.cluster.Member(id).IsLearner
+ // no need to check quorum when removing non-voting member
+ if isLearner {
+ return nil
+ }
+
+ if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) {
+ lg.Warn(
+ "rejecting member remove request; not enough healthy members",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("requested-member-remove-id", id.String()),
+ zap.Error(ErrNotEnoughStartedMembers),
+ )
+ return ErrNotEnoughStartedMembers
+ }
+
+ // downed member is safe to remove since it's not part of the active quorum
+ if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
+ return nil
+ }
+
+ // protect quorum if some members are down
+ m := s.cluster.VotingMembers()
+ active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
+ if (active - 1) < 1+((len(m)-1)/2) {
+ lg.Warn(
+ "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("requested-member-remove", id.String()),
+ zap.Int("active-peers", active),
+ zap.Error(ErrUnhealthy),
+ )
+ return ErrUnhealthy
+ }
+
+ return nil
+}
+
+// FirstCommitInTermNotify
+// 任期内第一次commit会往这个channel发个信号,这是新leader回答只读请求所必需的
+// Leader不能响应任何只读请求,只要线性语义是必需的
+func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} {
+ s.firstCommitInTermMu.RLock()
+ defer s.firstCommitInTermMu.RUnlock()
+ return s.firstCommitInTermC
+}
+
+type confChangeResponse struct {
+ membs []*membership.Member
+ err error
+}
+
+// configureAndSendRaft 通过raft发送配置变更,然后等待后端应用到etcd.它将阻塞,直到更改执行或出现错误.
+func (s *EtcdServer) configureAndSendRaft(ctx context.Context, cc raftpb.ConfChangeV1) ([]*membership.Member, error) {
+ lg := s.Logger()
+ cc.ID = s.reqIDGen.Next()
+ ch := s.w.Register(cc.ID)
+
+ start := time.Now()
+ if err := s.r.ProposeConfChange(ctx, cc); err != nil {
+ s.w.Trigger(cc.ID, nil)
+ return nil, err
+ }
+
+ select {
+ case x := <-ch:
+ if x == nil {
+ lg.Panic("配置失败")
+ }
+ resp := x.(*confChangeResponse)
+ lg.Info(
+ "通过raft应用配置更改",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("raft-conf-change", cc.Type.String()),
+ zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()),
+ )
+ return resp.membs, resp.err
+
+ case <-ctx.Done():
+ s.w.Trigger(cc.ID, nil) // GC wait
+ return nil, s.parseProposeCtxErr(ctx.Err(), start)
+
+ case <-s.stopping:
+ return nil, ErrStopped
+ }
+}
+
+// sync proposes a SYNC request and is non-blocking.
+// This makes no guarantee that the request will backend proposed or performed.
+// The request will backend canceled after the given timeout.
+func (s *EtcdServer) sync(timeout time.Duration) {
+ req := pb.Request{
+ Method: "SYNC",
+ ID: s.reqIDGen.Next(),
+ Time: time.Now().UnixNano(),
+ }
+ data := pbutil.MustMarshal(&req)
+ // There is no promise that node has leader when do SYNC request,
+ // so it uses goroutine to propose.
+ ctx, cancel := context.WithTimeout(s.ctx, timeout)
+ s.GoAttach(func() {
+ s.r.Propose(ctx, data)
+ cancel()
+ })
+}
+
+// publish registers etcd information into the cluster. The information
+// is the JSON representation of this etcd's member struct, updated with the
+// static clientURLs of the etcd.
+// The function keeps attempting to register until it succeeds,
+// or its etcd is stopped.
+//
+// Use v2 store to encode member attributes, and apply through Raft
+// but does not go through v2 API endpoint, which means even with v2
+// client handler disabled (e.g. --enable-v2=false), cluster can still
+// process publish requests through rafthttp
+// TODO: Remove in 3.6 (start using publishV3)
+func (s *EtcdServer) publish(timeout time.Duration) {
+ lg := s.Logger()
+ b, err := json.Marshal(s.attributes)
+ if err != nil {
+ lg.Panic("failed to marshal JSON", zap.Error(err))
+ return
+ }
+ req := pb.Request{
+ Method: "PUT",
+ Path: membership.MemberAttributesStorePath(s.id),
+ Val: string(b),
+ }
+
+ for {
+ ctx, cancel := context.WithTimeout(s.ctx, timeout)
+ _, err := s.Do(ctx, req)
+ cancel()
+ switch err {
+ case nil:
+ close(s.readych)
+ lg.Info(
+ "published local member to cluster through raft",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
+ zap.String("request-path", req.Path),
+ zap.String("cluster-id", s.cluster.ID().String()),
+ zap.Duration("publish-timeout", timeout),
+ )
+ return
+
+ case ErrStopped:
+ lg.Warn(
+ "stopped publish because etcd is stopped",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
+ zap.Duration("publish-timeout", timeout),
+ zap.Error(err),
+ )
+ return
+
+ default:
+ lg.Warn(
+ "通过raft发布本机信息失败",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
+ zap.String("request-path", req.Path),
+ zap.Duration("publish-timeout", timeout),
+ zap.Error(err),
+ )
+ }
+ }
+}
+
+func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
+ atomic.AddInt64(&s.inflightSnapshots, 1)
+
+ lg := s.Logger()
+ fields := []zap.Field{
+ zap.String("from", s.ID().String()),
+ zap.String("to", types.ID(merged.To).String()),
+ zap.Int64("bytes", merged.TotalSize),
+ zap.String("size", humanize.Bytes(uint64(merged.TotalSize))),
+ }
+
+ now := time.Now()
+ s.r.transport.SendSnapshot(merged)
+ lg.Info("sending merged snapshot", fields...)
+
+ s.GoAttach(func() {
+ select {
+ case ok := <-merged.CloseNotify():
+ // delay releasing inflight snapshot for another 30 seconds to
+ // block log compaction.
+ // If the follower still fails to catch up, it is probably just too slow
+ // to catch up. We cannot avoid the snapshot cycle anyway.
+ if ok {
+ select {
+ case <-time.After(releaseDelayAfterSnapshot):
+ case <-s.stopping:
+ }
+ }
+
+ atomic.AddInt64(&s.inflightSnapshots, -1)
+
+ lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...)
+
+ case <-s.stopping:
+ lg.Warn("canceled sending merged snapshot; etcd stopping", fields...)
+ return
+ }
+ })
+}
+
+// apply 从raft 获取到committed ---> applying
+func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) {
+ // confState 当前快照中的 集群配置
+ s.lg.Debug("开始应用日志", zap.Int("num-entries", len(es)))
+ for i := range es {
+ e := es[i]
+ s.lg.Debug("开始应用日志", zap.Uint64("index", e.Index), zap.Uint64("term", e.Term), zap.Stringer("type", e.Type))
+ switch e.Type {
+ case raftpb.EntryNormal:
+ s.applyEntryNormal(&e)
+ s.setAppliedIndex(e.Index)
+ s.setTerm(e.Term)
+
+ case raftpb.EntryConfChange:
+ shouldApplyV3 := membership.ApplyV2storeOnly // false
+ if e.Index > s.consistIndex.ConsistentIndex() { // 查找 bolt.db meta 库里的 consistent_index、term
+ s.consistIndex.SetConsistentIndex(e.Index, e.Term) // 更新内存里的
+ shouldApplyV3 = membership.ApplyBoth // true
+ }
+
+ var cc raftpb.ConfChangeV1
+ _ = cc.Unmarshal
+ pbutil.MustUnmarshal(&cc, e.Data)
+ // ConfChangeAddNode {"id":10276657743932975437,"peerURLs":["http://localhost:2380"],"name":"default"}
+ removedSelf, err := s.applyConfChange(cc, confState, shouldApplyV3)
+ s.setAppliedIndex(e.Index)
+ s.setTerm(e.Term)
+ shouldStop = shouldStop || removedSelf
+ s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
+
+ default:
+ lg := s.Logger()
+ lg.Panic(
+ "未知的日志类型;必须是 EntryNormal 或 EntryConfChange",
+ zap.String("type", e.Type.String()),
+ )
+ }
+ appliedi, appliedt = e.Index, e.Term
+ }
+ return appliedt, appliedi, shouldStop
+}
+
+// TODO: non-blocking snapshot
+func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
+ clone := s.v2store.Clone()
+ // commit kv to write metadata (for example: consistent index) to disk.
+ //
+ // This guarantees that Backend's consistent_index is >= index of last snapshot.
+ //
+ // KV().commit() updates the consistent index in backend.
+ // All operations that update consistent index必须是called sequentially
+ // from applyAll function.
+ // So KV().Commit() cannot run in parallel with apply. It has to backend called outside
+ // the go routine created below.
+ s.KV().Commit()
+
+ s.GoAttach(func() {
+ lg := s.Logger()
+
+ d, err := clone.SaveNoCopy()
+ // TODO: current store will never fail to do a snapshot
+ // what should we do if the store might fail?
+ if err != nil {
+ lg.Panic("failed to save v2 store", zap.Error(err))
+ }
+ snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
+ if err != nil {
+ // the snapshot was done asynchronously with the progress of raft.
+ // raft might have already got a newer snapshot.
+ if err == raft.ErrSnapOutOfDate {
+ return
+ }
+ lg.Panic("failed to create snapshot", zap.Error(err))
+ }
+ // SaveSnap saves the snapshot to file and appends the corresponding WAL entry.
+ if err = s.r.storage.SaveSnap(snap); err != nil {
+ lg.Panic("failed to save snapshot", zap.Error(err))
+ }
+ if err = s.r.storage.Release(snap); err != nil {
+ lg.Panic("failed to release wal", zap.Error(err))
+ }
+
+ lg.Info(
+ "saved snapshot",
+ zap.Uint64("snapshot-index", snap.Metadata.Index),
+ )
+
+ // When sending a snapshot, etcd will pause compaction.
+ // After receives a snapshot, the slow follower needs to get all the entries right after
+ // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
+ // the snapshot sent might already backend compacted. It happens when the snapshot takes long time
+ // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
+ if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
+ lg.Info("skip compaction since there is an inflight snapshot")
+ return
+ }
+
+ // keep some in memory log entries for slow followers.
+ compacti := uint64(1)
+ if snapi > s.Cfg.SnapshotCatchUpEntries {
+ compacti = snapi - s.Cfg.SnapshotCatchUpEntries // 保留一定数量的日志,为了follower可以追赶
+ }
+
+ err = s.r.raftStorage.Compact(compacti)
+ if err != nil {
+ // the compaction was done asynchronously with the progress of raft.
+ // raft log might already been compact.
+ if err == raft.ErrCompacted {
+ return
+ }
+ lg.Panic("failed to compact", zap.Error(err))
+ }
+ lg.Info(
+ "compacted Raft logs",
+ zap.Uint64("compact-index", compacti),
+ )
+ })
+}
+
+// CutPeer drops messages to the specified peer.
+func (s *EtcdServer) CutPeer(id types.ID) {
+ tr, ok := s.r.transport.(*rafthttp.Transport)
+ if ok {
+ tr.CutPeer(id)
+ }
+}
+
+// MendPeer recovers the message dropping behavior of the given peer.
+func (s *EtcdServer) MendPeer(id types.ID) {
+ tr, ok := s.r.transport.(*rafthttp.Transport)
+ if ok {
+ tr.MendPeer(id)
+ }
+}
+
+func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
+
+func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
+
+// monitorVersions checks the member's version every monitorVersionInterval.
+// It updates the cluster version if all members agrees on a higher one.
+// It prints out log if there is a member with a higher version than the
+// local version.
+func (s *EtcdServer) monitorVersions() {
+ for {
+ select {
+ case <-s.FirstCommitInTermNotify():
+ case <-time.After(monitorVersionInterval):
+ case <-s.stopping:
+ return
+ }
+
+ if s.Leader() != s.ID() {
+ continue
+ }
+
+ v := decideClusterVersion(s.Logger(), getVersions(s.Logger(), s.cluster, s.id, s.peerRt))
+ if v != nil {
+ // only keep major.minor version for comparison
+ v = &semver.Version{
+ Major: v.Major,
+ Minor: v.Minor,
+ }
+ }
+
+ // if the current version is nil:
+ // 1. use the decided version if possible
+ // 2. or use the min cluster version
+ if s.cluster.Version() == nil {
+ verStr := version.MinClusterVersion
+ if v != nil {
+ verStr = v.String()
+ }
+ s.GoAttach(func() { s.updateClusterVersionV2(verStr) })
+ continue
+ }
+
+ if v != nil && membership.IsValidVersionChange(s.cluster.Version(), v) {
+ s.GoAttach(func() { s.updateClusterVersionV2(v.String()) })
+ }
+ }
+}
+
+func (s *EtcdServer) updateClusterVersionV2(ver string) {
+ lg := s.Logger()
+ if s.cluster.Version() == nil {
+ lg.Info("使用v2 API 设置初始集群版本", zap.String("cluster-version", version.Cluster(ver)))
+ } else {
+ lg.Info("使用v2 API 更新初始集群版本", zap.String("from", version.Cluster(s.cluster.Version().String())), zap.String("to", version.Cluster(ver)))
+ }
+
+ req := pb.Request{
+ Method: "PUT",
+ Path: membership.StoreClusterVersionKey(), // /0/version
+ Val: ver,
+ }
+
+ ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
+ fmt.Println("start", time.Now())
+ _, err := s.Do(ctx, req)
+ fmt.Println("end", time.Now())
+ cancel()
+
+ switch err {
+ case nil:
+ lg.Info("集群版本已更新", zap.String("cluster-version", version.Cluster(ver)))
+ return
+
+ case ErrStopped:
+ lg.Warn("终止集群版本更新;etcd被停止了", zap.Error(err))
+ return
+
+ default:
+ lg.Warn("集群版本更新失败", zap.Error(err))
+ }
+}
+
+func (s *EtcdServer) monitorDowngrade() {
+ t := s.Cfg.DowngradeCheckTime
+ if t == 0 {
+ return
+ }
+ lg := s.Logger()
+ for {
+ select {
+ case <-time.After(t):
+ case <-s.stopping:
+ return
+ }
+
+ if !s.isLeader() {
+ continue
+ }
+
+ d := s.cluster.DowngradeInfo()
+ if !d.Enabled {
+ continue
+ }
+
+ targetVersion := d.TargetVersion
+ v := semver.Must(semver.NewVersion(targetVersion))
+ if isMatchedVersions(s.Logger(), v, getVersions(s.Logger(), s.cluster, s.id, s.peerRt)) {
+ lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion))
+ ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+ if _, err := s.downgradeCancel(ctx); err != nil {
+ lg.Warn("failed to cancel downgrade", zap.Error(err))
+ }
+ cancel()
+ }
+ }
+}
+
+func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
+ switch err {
+ case context.Canceled:
+ return ErrCanceled
+
+ case context.DeadlineExceeded:
+ s.leadTimeMu.RLock()
+ curLeadElected := s.leadElectedTime
+ s.leadTimeMu.RUnlock()
+ prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
+ if start.After(prevLeadLost) && start.Before(curLeadElected) {
+ return ErrTimeoutDueToLeaderFail
+ }
+ lead := types.ID(s.getLead())
+ switch lead {
+ case types.ID(raft.None):
+ // 当前没有leader
+ case s.ID(): // leader是自己
+ if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { // 检查是否与大多数节点建立连接
+ return ErrTimeoutDueToConnectionLost
+ }
+ default:
+ // 检查是否自给定时间以后,与该节点建立连接
+ if !isConnectedSince(s.r.transport, start, lead) {
+ return ErrTimeoutDueToConnectionLost
+ }
+ }
+ return ErrTimeout
+
+ default:
+ return err
+ }
+}
+
+func (s *EtcdServer) Backend() backend.Backend {
+ s.backendLock.Lock()
+ defer s.backendLock.Unlock()
+ return s.backend
+}
+
+func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
+
+// 启动时重置所有警报
+func (s *EtcdServer) restoreAlarms() error {
+ s.applyV3 = s.newApplierV3()
+ as, err := v3alarm.NewAlarmStore(s.lg, s)
+ if err != nil {
+ return err
+ }
+ s.alarmStore = as
+ // 警报只有这两种类型
+ if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
+ s.applyV3 = newApplierV3Capped(s.applyV3)
+ }
+ if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
+ s.applyV3 = newApplierV3Corrupt(s.applyV3)
+ }
+ return nil
+}
+
+// ----------------------------------------- OVER --------------------------------------------------------------
+
+// GoAttach 启动一个协程干活
+func (s *EtcdServer) GoAttach(f func()) {
+ s.wgMu.RLock() // stopping 关闭 是加锁操作
+ defer s.wgMu.RUnlock()
+ select {
+ case <-s.stopping:
+ lg := s.Logger()
+ lg.Warn("etcd 已停止; 跳过 GoAttach")
+ return
+ default:
+ }
+
+ // 现在可以安全添加因为等待组的等待还没有开始.
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ f()
+ }()
+}
+
+// applyEntryNormal 将日志应用到raft内部
+func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
+ shouldApplyV3 := membership.ApplyV2storeOnly
+ index := s.consistIndex.ConsistentIndex()
+ if e.Index > index {
+ // 设置当前entry的一致性索引
+ s.consistIndex.SetConsistentIndex(e.Index, e.Term)
+ shouldApplyV3 = membership.ApplyBoth // v2store 、bolt.db 都存储数据
+ }
+ s.lg.Debug("应用日志", zap.Uint64("consistent-index", index),
+ zap.Uint64("entry-index", e.Index),
+ zap.Bool("should-applyV3", bool(shouldApplyV3)))
+
+ // 当leader确认时raft状态机可能会产生noop条目. 提前跳过它以避免将来出现一些潜在的错误.
+ if len(e.Data) == 0 {
+ s.notifyAboutFirstCommitInTerm() // 被任期内 第一次commit更新channel
+ // 当本地成员是leader 并完成了上一任期的所有条目时促进follower.
+ if s.isLeader() {
+ // 成为leader时,初始化租约管理器
+ s.lessor.Promote(s.Cfg.ElectionTimeout())
+ }
+ return
+ }
+ // e.Data 是由 pb.InternalRaftRequest、 序列化得到的
+ var raftReq pb.InternalRaftRequest
+ if pbutil.MaybeUnmarshal(&raftReq, e.Data) {
+ } else {
+ // 如果不能不能反序列化
+ // {"ID":7587861231285799684,"Method":"PUT","Path":"/0/version","Val":"3.5.0","Dir":false,"PrevValue":"","PrevIndex":0,"Expiration":0,"Wait":false,"Since":0,"Recursive":false,"Sorted":false,"Quorum":false,"Time":0,"Stream":false}
+ var r pb.Request
+ rp := &r
+ pbutil.MustUnmarshal(rp, e.Data)
+ s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp), shouldApplyV3))
+ fmt.Println("pbutil.MustUnmarshal return")
+ return
+ }
+ // 如果能
+ //{"header":{"ID":7587861231285799685},"put":{"key":"YQ==","value":"Yg=="}}
+ if raftReq.V2 != nil {
+ req := (*RequestV2)(raftReq.V2)
+ s.w.Trigger(req.ID, s.applyV2Request(req, shouldApplyV3))
+ return
+ }
+
+ id := raftReq.ID
+ if id == 0 {
+ id = raftReq.Header.ID
+ }
+
+ var ar *applyResult
+ needResult := s.w.IsRegistered(id)
+ if needResult || !noSideEffect(&raftReq) {
+ if !needResult && raftReq.Txn != nil {
+ removeNeedlessRangeReqs(raftReq.Txn)
+ }
+ ar = s.applyV3.Apply(&raftReq, shouldApplyV3)
+ }
+
+ if !shouldApplyV3 { // 是否存储到bolt.db
+ return
+ }
+
+ if ar == nil {
+ return
+ }
+
+ if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
+ s.w.Trigger(id, ar)
+ return
+ }
+
+ lg := s.Logger()
+ lg.Warn("消息超过了后端配额;发出警报", zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
+ zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
+ zap.Error(ar.err),
+ )
+
+ s.GoAttach(func() {
+ a := &pb.AlarmRequest{
+ MemberID: uint64(s.ID()),
+ Action: pb.AlarmRequest_ACTIVATE, // 日志应用时, 激活警报
+ Alarm: pb.AlarmType_NOSPACE,
+ }
+ s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
+ s.w.Trigger(id, ar)
+ })
+}
+
+// 通知关于任期内的第一次commit
+func (s *EtcdServer) notifyAboutFirstCommitInTerm() {
+ newNotifier := make(chan struct{})
+ s.firstCommitInTermMu.Lock()
+ notifierToClose := s.firstCommitInTermC
+ // 同于响应只读请求的
+ s.firstCommitInTermC = newNotifier
+ s.firstCommitInTermMu.Unlock()
+ close(notifierToClose)
+}
+
+// IsLearner 当前节点是不是 raft learner
+func (s *EtcdServer) IsLearner() bool {
+ return s.cluster.IsLocalMemberLearner()
+}
+
+// IsMemberExist returns if the member with the given id exists in cluster.
+func (s *EtcdServer) IsMemberExist(id types.ID) bool {
+ return s.cluster.IsMemberExist(id)
+}
+
+// raftStatus 返回当前节点的raft状态
+func (s *EtcdServer) raftStatus() raft.Status {
+ return s.r.RaftNodeInterFace.Status()
+}
+
+// 碎片整理
+func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
+ size := be.Size()
+ sizeInUse := be.SizeInUse()
+ freeableMemory := uint(size - sizeInUse) // 剩余
+ thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024
+ if freeableMemory < thresholdBytes {
+ cfg.Logger.Info("跳过碎片整理",
+ zap.Int64("current-db-size-bytes", size),
+ zap.String("current-db-size", humanize.Bytes(uint64(size))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
+ zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes),
+ zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))),
+ )
+ return nil
+ }
+ return be.Defrag()
+}
+
+// applyConfChange 将一个confChange作用到当前raft,它必须已经committed
+func (s *EtcdServer) applyConfChange(cc raftpb.ConfChangeV1, confState *raftpb.ConfState, shouldApplyV3 membership.ShouldApplyV3) (bool, error) {
+ if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
+ cc.NodeID = raft.None // 这种,不会处理的
+ s.r.ApplyConfChange(cc)
+ return false, err
+ }
+
+ lg := s.Logger()
+ *confState = *s.r.ApplyConfChange(cc) // 生效之后的配置
+ s.beHooks.SetConfState(confState)
+ switch cc.Type {
+ // 集群里记录的quorum.JointConfig与peer信息已经更新
+ case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode:
+ confChangeContext := new(membership.ConfigChangeContext)
+ if err := json.Unmarshal([]byte(cc.Context), confChangeContext); err != nil {
+ lg.Panic("发序列化成员失败", zap.Error(err))
+ }
+ if cc.NodeID != uint64(confChangeContext.Member.ID) {
+ lg.Panic("得到不同的成员ID",
+ zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
+ zap.String("member-id-from-message", confChangeContext.Member.ID.String()),
+ )
+ }
+ if confChangeContext.IsPromote { // 是否角色提升
+ s.cluster.PromoteMember(confChangeContext.Member.ID, shouldApplyV3)
+ } else {
+ s.cluster.AddMember(&confChangeContext.Member, shouldApplyV3) // 添加节点 /0/members/8e9e05c52164694d
+ if confChangeContext.Member.ID != s.id { // 不是本实例
+ s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs)
+ }
+ }
+
+ case raftpb.ConfChangeRemoveNode:
+ id := types.ID(cc.NodeID)
+ s.cluster.RemoveMember(id, shouldApplyV3) // ✅
+ if id == s.id {
+ return true, nil
+ }
+ s.r.transport.RemovePeer(id)
+
+ case raftpb.ConfChangeUpdateNode:
+ m := new(membership.Member)
+ if err := json.Unmarshal([]byte(cc.Context), m); err != nil {
+ lg.Panic("反序列化失败", zap.Error(err))
+ }
+ if cc.NodeID != uint64(m.ID) {
+ lg.Panic("得到了一个不同的ID",
+ zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
+ zap.String("member-id-from-message", m.ID.String()),
+ )
+ }
+ s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes, shouldApplyV3)
+ if m.ID != s.id {
+ s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
+ }
+ }
+ return false, nil
+}
+
+// Alarms 获取所有的警报,
+func (s *EtcdServer) Alarms() []*pb.AlarmMember {
+ return s.alarmStore.Get(pb.AlarmType_NONE)
+}
+
+func (s *EtcdServer) setCommittedIndex(v uint64) {
+ atomic.StoreUint64(&s.committedIndex, v)
+}
+
+func (s *EtcdServer) getCommittedIndex() uint64 {
+ return atomic.LoadUint64(&s.committedIndex)
+}
+
+func (s *EtcdServer) setAppliedIndex(v uint64) {
+ atomic.StoreUint64(&s.appliedIndex, v)
+}
+
+func (s *EtcdServer) getAppliedIndex() uint64 {
+ return atomic.LoadUint64(&s.appliedIndex)
+}
+
+func (s *EtcdServer) setTerm(v uint64) {
+ atomic.StoreUint64(&s.term, v)
+}
+
+func (s *EtcdServer) getTerm() uint64 {
+ return atomic.LoadUint64(&s.term)
+}
+
+func (s *EtcdServer) setLead(v uint64) {
+ atomic.StoreUint64(&s.lead, v)
+}
+
+func (s *EtcdServer) getLead() uint64 {
+ return atomic.LoadUint64(&s.lead)
+}
+
+func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
+
+func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
+
+func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} {
+ s.leaderChangedMu.RLock()
+ defer s.leaderChangedMu.RUnlock()
+ return s.leaderChanged
+}
+
+func (s *EtcdServer) KV() mvcc.WatchableKV { return s.kv }
+
+// Process 接收一个raft信息并将其应用于etcd的raft状态机,使用ctx的超时.
+func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
+ lg := s.Logger()
+ // 判断该消息的来源有没有被删除
+ if s.cluster.IsIDRemoved(types.ID(m.From)) {
+ lg.Warn("拒绝来自被删除的成员的raft的信息",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("removed-member-id", types.ID(m.From).String()),
+ )
+ return httptypes.NewHTTPError(http.StatusForbidden, "无法处理来自被删除成员的信息")
+ }
+ // 操作日志【复制、配置变更 req】
+ if m.Type == raftpb.MsgApp {
+ s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
+ }
+ var _ raft.RaftNodeInterFace = raftNode{}
+ //_ = raftNode{}.Step
+ return s.r.Step(ctx, m)
+}
+
+func (s *EtcdServer) ClusterVersion() *semver.Version {
+ if s.cluster == nil {
+ return nil
+ }
+ return s.cluster.Version()
+}
diff --git a/server/etcdserver/snapshot_merge.go b/etcd/etcdserver/snapshot_merge.go
similarity index 90%
rename from server/etcdserver/snapshot_merge.go
rename to etcd/etcdserver/snapshot_merge.go
index 963ead5a7e2..0e2f7785893 100644
--- a/server/etcdserver/snapshot_merge.go
+++ b/etcd/etcdserver/snapshot_merge.go
@@ -17,9 +17,9 @@ package etcdserver
import (
"io"
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/storage/backend"
- "go.etcd.io/raft/v3/raftpb"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
humanize "github.com/dustin/go-humanize"
"go.uber.org/zap"
@@ -39,7 +39,7 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi
// commit kv to write metadata(for example: consistent index).
s.KV().Commit()
- dbsnap := s.be.Snapshot()
+ dbsnap := s.backend.Snapshot()
// get a snapshot of v3 KV as readCloser
rc := newSnapshotReaderCloser(lg, dbsnap)
@@ -53,9 +53,7 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi
},
Data: d,
}
- m.Snapshot = &snapshot
-
- verifySnapshotIndex(snapshot, s.consistIndex.ConsistentIndex())
+ m.Snapshot = snapshot
return *snap.NewMessage(m, rc, dbsnap.Size())
}
diff --git a/etcd/etcdserver/storage.go b/etcd/etcdserver/storage.go
new file mode 100644
index 00000000000..d8a2678008d
--- /dev/null
+++ b/etcd/etcdserver/storage.go
@@ -0,0 +1,122 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "io"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ "github.com/ls-2018/etcd_cn/etcd/wal"
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+
+ "go.uber.org/zap"
+)
+
+type Storage interface {
+ // Save function saves ents and state to the underlying stable storage.
+ // Save MUST block until st and ents are on stable storage.
+ Save(st raftpb.HardState, ents []raftpb.Entry) error
+ // SaveSnap function saves snapshot to the underlying stable storage.
+ SaveSnap(snap raftpb.Snapshot) error
+ // Close closes the Storage and performs finalization.
+ Close() error
+ // Release releases the locked wal files older than the provided snapshot.
+ Release(snap raftpb.Snapshot) error
+ // Sync WAL
+ Sync() error
+}
+
+// 静态存储实际上是保存到磁盘中,Storage是对WAL和Snapshot的封装
+type storage struct {
+ *wal.WAL
+ *snap.Snapshotter
+}
+
+func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage {
+ return &storage{w, s}
+}
+
+// SaveSnap saves the snapshot file to disk and writes the WAL snapshot entry.
+func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
+ walsnap := walpb.Snapshot{
+ Index: snap.Metadata.Index,
+ Term: snap.Metadata.Term,
+ ConfState: &snap.Metadata.ConfState,
+ }
+ // save the snapshot file before writing the snapshot to the wal.
+ // This makes it possible for the snapshot file to become orphaned, but prevents
+ // a WAL snapshot entry from having no corresponding snapshot file.
+ err := st.Snapshotter.SaveSnap(snap)
+ if err != nil {
+ return err
+ }
+ // gofail: var raftBeforeWALSaveSnaphot struct{}
+
+ return st.WAL.SaveSnapshot(walsnap)
+}
+
+// Release releases resources older than the given snap and are no longer needed:
+// - releases the locks to the wal files that are older than the provided wal for the given snap.
+// - deletes any .snap.db files that are older than the given snap.
+func (st *storage) Release(snap raftpb.Snapshot) error {
+ if err := st.WAL.ReleaseLockTo(snap.Metadata.Index); err != nil {
+ return err
+ }
+ return st.Snapshotter.ReleaseSnapDBs(snap)
+}
+
+// readWAL reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
+// after the position of the given snap in the WAL.
+// The snap must have been previously saved to the WAL, or this call will panic.
+func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot, unsafeNoFsync bool) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
+ var (
+ err error
+ wmetadata []byte
+ )
+
+ repaired := false
+ for {
+ if w, err = wal.Open(lg, waldir, snap); err != nil {
+ lg.Fatal("failed to open WAL", zap.Error(err))
+ }
+ if unsafeNoFsync {
+ w.SetUnsafeNoFsync()
+ }
+ if wmetadata, st, ents, err = w.ReadAll(); err != nil {
+ w.Close()
+ // we can only repair ErrUnexpectedEOF and we never repair twice.
+ if repaired || err != io.ErrUnexpectedEOF {
+ lg.Fatal("failed to read WAL, cannot backend repaired", zap.Error(err))
+ }
+ if !wal.Repair(lg, waldir) {
+ lg.Fatal("failed to repair WAL", zap.Error(err))
+ } else {
+ lg.Info("repaired WAL", zap.Error(err))
+ repaired = true
+ }
+ continue
+ }
+ break
+ }
+ var metadata pb.Metadata
+ pbutil.MustUnmarshal(&metadata, wmetadata)
+ id = types.ID(metadata.NodeID)
+ cid = types.ID(metadata.ClusterID)
+ return w, id, cid, st, ents
+}
diff --git a/etcd/etcdserver/util.go b/etcd/etcdserver/util.go
new file mode 100644
index 00000000000..ddeefb6789e
--- /dev/null
+++ b/etcd/etcdserver/util.go
@@ -0,0 +1,100 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/rafthttp"
+ "go.uber.org/zap"
+)
+
+// isConnectedToQuorumSince 检查本地成员是否在给定的时间后连接到集群的法定人数.
+func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
+ return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1 // 2.5
+}
+
+// isConnectedSince 检查是否自给定时间以后,是否与该节点建立连接
+func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
+ t := transport.ActiveSince(remote)
+ return !t.IsZero() && t.Before(since)
+}
+
+// isConnectedFullySince 检查本机是否与所有成员都建立了链接,从给定的时间
+func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
+ return numConnectedSince(transport, since, self, members) == len(members)
+}
+
+// longestConnected chooses the member with longest active-since-time.
+// It returns false, if nothing is active.
+func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
+ var longest types.ID
+ var oldest time.Time
+ for _, id := range membs {
+ tm := tp.ActiveSince(id)
+ if tm.IsZero() { // inactive
+ continue
+ }
+
+ if oldest.IsZero() { // first longest candidate
+ oldest = tm
+ longest = id
+ }
+
+ if tm.Before(oldest) {
+ oldest = tm
+ longest = id
+ }
+ }
+ if uint64(longest) == 0 {
+ return longest, false
+ }
+ return longest, true
+}
+
+func warnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
+ var resp string
+ if !isNil(respMsg) {
+ resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
+ }
+ d := time.Since(now)
+ lg.Warn(
+ "failed to apply request",
+ zap.Duration("took", d),
+ zap.String("request", reqStringer.String()),
+ zap.String("response", resp),
+ zap.Error(err),
+ )
+}
+
+func isNil(msg proto.Message) bool {
+ return msg == nil || reflect.ValueOf(msg).IsNil()
+}
+
+// numConnectedSince 计算自给定时间以来有多少成员与本地成员相连.
+func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
+ connectedNum := 0
+ for _, m := range members {
+ if m.ID == self || isConnectedSince(transport, since, m.ID) {
+ connectedNum++
+ }
+ }
+ return connectedNum
+}
diff --git a/server/etcdserver/v2_server.go b/etcd/etcdserver/v2_server.go
similarity index 84%
rename from server/etcdserver/v2_server.go
rename to etcd/etcdserver/v2_server.go
index 517d7ca7f70..89b007b21f0 100644
--- a/server/etcdserver/v2_server.go
+++ b/etcd/etcdserver/v2_server.go
@@ -16,12 +16,12 @@ package etcdserver
import (
"context"
+ "fmt"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
- "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
)
type RequestV2 pb.Request
@@ -100,26 +100,27 @@ func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *Requ
if err != nil {
return Response{}, err
}
+ /* 注册并且创建一个channel, 此处ID每次请求都会重新生成*/
ch := a.s.w.Register(r.ID)
start := time.Now()
a.s.r.Propose(ctx, data)
- proposalsPending.Inc()
- defer proposalsPending.Dec()
-
+ _ = a.s.applyEntryNormal
select {
case x := <-ch:
resp := x.(Response)
return resp, resp.Err
case <-ctx.Done():
- proposalsFailed.Inc()
- a.s.w.Trigger(r.ID, nil) // GC wait
+ a.s.w.Trigger(r.ID, nil)
return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
- case <-a.s.stopping:
+ case x := <-a.s.stopping:
+ fmt.Println("<-a.s.stopping", x)
}
- return Response{}, errors.ErrStopped
+ return Response{}, ErrStopped
}
+// --------------- over ------------------------
+
func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
r.ID = s.reqIDGen.Next()
h := &reqV2HandlerEtcdServer{
@@ -135,11 +136,6 @@ func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
return resp, err
}
-// Handle interprets r and performs an operation on s.store according to r.Method
-// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
-// Quorum == true, r will be sent through consensus before performing its
-// respective operation. Do will block until an action is performed or there is
-// an error.
func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) {
if r.Method == "GET" && r.Quorum {
r.Method = "QGET"
@@ -158,7 +154,7 @@ func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Respons
case "HEAD":
return v2api.Head(ctx, r)
}
- return Response{}, errors.ErrUnknownMethod
+ return Response{}, ErrUnknownMethod
}
func (r *RequestV2) String() string {
diff --git a/etcd/etcdserver/v3_server.go b/etcd/etcdserver/v3_server.go
new file mode 100644
index 00000000000..e9b66e747b3
--- /dev/null
+++ b/etcd/etcdserver/v3_server.go
@@ -0,0 +1,452 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/membershippb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+
+ "github.com/gogo/protobuf/proto"
+ "go.uber.org/zap"
+)
+
+const (
+ // In the health case, there might backend a small gap (10s of entries) between
+ // the applied index and committed index.
+ // However, if the committed entries are very heavy to apply, the gap might grow.
+ // We should stop accepting new proposals if the gap growing to a certain point.
+ maxGapBetweenApplyAndCommitIndex = 5000
+ readIndexRetryTime = 500 * time.Millisecond
+)
+
+type Authenticator interface {
+ AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error)
+ AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error)
+ AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error)
+ Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error)
+ UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+ UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+ UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+ UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+ UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+ UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+ RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+ RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+ RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+ RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+ RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+ UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+ RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+}
+
+func isTxnSerializable(r *pb.TxnRequest) bool {
+ for _, u := range r.Success {
+ if r := u.GetRequestRange(); r == nil || !r.Serializable {
+ return false
+ }
+ }
+ for _, u := range r.Failure {
+ if r := u.GetRequestRange(); r == nil || !r.Serializable {
+ return false
+ }
+ }
+ return true
+}
+
+func isTxnReadonly(r *pb.TxnRequest) bool {
+ for _, u := range r.Success {
+ if r := u.GetRequestRange(); r == nil {
+ return false
+ }
+ }
+ for _, u := range r.Failure {
+ if r := u.GetRequestRange(); r == nil {
+ return false
+ }
+ }
+ return true
+}
+
+// Watchable returns a watchable interface attached to the etcdserver.
+func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
+
+func isStopped(err error) bool {
+ return err == raft.ErrStopped || err == ErrStopped
+}
+
+func uint64ToBigEndianBytes(number uint64) []byte {
+ byteResult := make([]byte, 8)
+ binary.BigEndian.PutUint64(byteResult, number)
+ return byteResult
+}
+
+func (s *EtcdServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ switch r.Action {
+ case pb.DowngradeRequest_VALIDATE:
+ return s.downgradeValidate(ctx, r.Version)
+ case pb.DowngradeRequest_ENABLE:
+ return s.downgradeEnable(ctx, r)
+ case pb.DowngradeRequest_CANCEL:
+ return s.downgradeCancel(ctx)
+ default:
+ return nil, ErrUnknownMethod
+ }
+}
+
+func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.DowngradeResponse, error) {
+ resp := &pb.DowngradeResponse{}
+
+ targetVersion, err := convertToClusterVersion(v)
+ if err != nil {
+ return nil, err
+ }
+
+ // gets leaders commit index and wait for local store to finish applying that index
+ // to avoid using stale downgrade information
+ err = s.linearizeReadNotify(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ cv := s.ClusterVersion()
+ if cv == nil {
+ return nil, ErrClusterVersionUnavailable
+ }
+ resp.Version = cv.String()
+
+ allowedTargetVersion := membership.AllowedDowngradeVersion(cv)
+ if !targetVersion.Equal(*allowedTargetVersion) {
+ return nil, ErrInvalidDowngradeTargetVersion
+ }
+
+ downgradeInfo := s.cluster.DowngradeInfo()
+ if downgradeInfo.Enabled {
+ // Todo: return the downgrade status along with the error msg
+ return nil, ErrDowngradeInProcess
+ }
+ return resp, nil
+}
+
+func (s *EtcdServer) downgradeEnable(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ // validate downgrade capability before starting downgrade
+ v := r.Version
+ lg := s.Logger()
+ if resp, err := s.downgradeValidate(ctx, v); err != nil {
+ lg.Warn("reject downgrade request", zap.Error(err))
+ return resp, err
+ }
+ targetVersion, err := convertToClusterVersion(v)
+ if err != nil {
+ lg.Warn("reject downgrade request", zap.Error(err))
+ return nil, err
+ }
+
+ raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()}
+ _, err = s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
+ if err != nil {
+ lg.Warn("reject downgrade request", zap.Error(err))
+ return nil, err
+ }
+ resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()}
+ return &resp, nil
+}
+
+func (s *EtcdServer) downgradeCancel(ctx context.Context) (*pb.DowngradeResponse, error) {
+ // gets leaders commit index and wait for local store to finish applying that index
+ // to avoid using stale downgrade information
+ if err := s.linearizeReadNotify(ctx); err != nil {
+ return nil, err
+ }
+
+ downgradeInfo := s.cluster.DowngradeInfo()
+ if !downgradeInfo.Enabled {
+ return nil, ErrNoInflightDowngrade
+ }
+
+ raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false}
+ _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
+ if err != nil {
+ return nil, err
+ }
+ resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()}
+ return &resp, nil
+}
+
+// ---------------------------------------- OVER ------------------------------------------------------------
+
+// AuthInfoFromCtx 获取认证信息
+func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
+ authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) // 用户认证
+ if authInfo != nil || err != nil {
+ return authInfo, err
+ }
+ if !s.Cfg.ClientCertAuthEnabled { // 是否验证客户端证书
+ return nil, nil
+ }
+ authInfo = s.AuthStore().AuthInfoFromTLS(ctx)
+ return authInfo, nil
+}
+
+// doSerialize 为序列化的请求“get"处理认证逻辑,并由“chk"检查权限.身份验证失败时返回一个非空错误.
+func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
+ trace := traceutil.Get(ctx) // 从上下文获取trace
+ ai, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+ if ai == nil {
+ // chk期望非nil AuthInfo;使用空的凭证
+ ai = &auth.AuthInfo{}
+ }
+ // 检查权限
+ if err = chk(ai); err != nil {
+ return err
+ }
+ trace.Step("获取认证元数据")
+ // 获取序列化请求的响应
+ get()
+ // 如果在处理请求时更新了身份验证存储,请检查过时的令牌修订情况.
+ if ai.Revision != 0 && ai.Revision != s.authStore.Revision() {
+ // 节点在 Apply 流程的时候,会判断 Raft 日志条目中的请求鉴权版本号是否小于当前鉴权版本号,如果小于就拒绝写入.
+ // 请求认证的版本小于当前节点认证的版本
+ return auth.ErrAuthOldRevision
+ }
+ return nil
+}
+
+// OK 对外提供的接口
+func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
+ return s.raftRequestOnce(ctx, r)
+}
+
+// ok
+func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
+ result, err := s.processInternalRaftRequestOnce(ctx, r)
+ if err != nil {
+ return nil, err
+ }
+ if result.err != nil {
+ return nil, result.err
+ }
+ // startTime
+ startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time)
+ if ok && result.trace != nil {
+ applyStart := result.trace.GetStartTime()
+ result.trace.SetStartTime(startTime)
+ result.trace.InsertStep(0, applyStart, "处理raft请求")
+ }
+ marshal, _ := json.Marshal(result.trace)
+ fmt.Println("trace--->", string(marshal))
+ return result.resp, nil
+}
+
+// 当客户端提交一条数据变更请求时
+func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
+ // 判断已提交未apply的记录是否超过限制
+ ai := s.getAppliedIndex()
+ ci := s.getCommittedIndex()
+ if ci > ai+maxGapBetweenApplyAndCommitIndex {
+ return nil, ErrTooManyRequests
+ }
+
+ r.Header = &pb.RequestHeader{
+ ID: s.reqIDGen.Next(), // 生成一个requestID
+ }
+
+ // 检查authinfo是否不是InternalAuthenticateRequest
+ if r.Authenticate == nil {
+ authInfo, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if authInfo != nil {
+ r.Header.Username = authInfo.Username
+ r.Header.AuthRevision = authInfo.Revision
+ }
+ }
+ // 反序列化请求数据
+
+ data, err := r.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) > int(s.Cfg.MaxRequestBytes) {
+ return nil, ErrRequestTooLarge
+ }
+
+ id := r.ID // 0
+ if id == 0 {
+ id = r.Header.ID
+ }
+ ch := s.w.Register(id) // 注册一个channel,等待处理完成
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) // 设置请求超时
+ // cctx, cancel := context.WithTimeout(ctx, time.Second*1000) // 设置请求超时
+ defer cancel()
+
+ start := time.Now()
+ _ = s.applyEntryNormal
+ err = s.r.Propose(cctx, data) // 调用raft模块的Propose处理请求,存入到了待发送队列
+ if err != nil {
+ s.w.Trigger(id, nil)
+ return nil, err
+ }
+
+ select {
+ // 等待收到apply结果返回给客户端
+ case x := <-ch:
+ return x.(*applyResult), nil
+ case <-cctx.Done():
+ s.w.Trigger(id, nil)
+ return nil, s.parseProposeCtxErr(cctx.Err(), start)
+ case <-s.done:
+ return nil, ErrStopped
+ }
+}
+
+// Apply 入口函数,负责处理内部的消息
+func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult {
+ ar := &applyResult{}
+ defer func(start time.Time) {
+ success := ar.err == nil || ar.err == mvcc.ErrCompacted
+ if !success {
+ warnOfFailedRequest(a.s.Logger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
+ }
+ }(time.Now())
+
+ switch {
+ case r.ClusterVersionSet != nil: // 3.5.x 实现
+ // 设置集群版本
+ a.s.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3)
+ return nil
+ case r.ClusterMemberAttrSet != nil:
+ // 集群成员属性
+ a.s.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3)
+ return nil
+ case r.DowngradeInfoSet != nil:
+ // 成员降级
+ a.s.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3)
+ return nil
+ }
+
+ if !shouldApplyV3 {
+ return nil
+ }
+
+ switch {
+ case r.Range != nil:
+ ar.resp, ar.err = a.s.applyV3.Range(context.TODO(), nil, r.Range) // ✅
+ case r.Put != nil:
+ ar.resp, ar.trace, ar.err = a.s.applyV3.Put(context.TODO(), nil, r.Put) // ✅
+ case r.DeleteRange != nil:
+ ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) // ✅
+ case r.Txn != nil:
+ ar.resp, ar.trace, ar.err = a.s.applyV3.Txn(context.TODO(), r.Txn)
+ case r.Compaction != nil:
+ ar.resp, ar.physc, ar.trace, ar.err = a.s.applyV3.Compaction(r.Compaction) // ✅ 压缩kv 历史事件
+ case r.LeaseGrant != nil:
+ ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) // ✅ 创建租约
+ case r.LeaseRevoke != nil:
+ ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) // ✅ 删除租约
+ case r.LeaseCheckpoint != nil:
+ // 避免 leader 变更时,导致的租约重置
+ ar.resp, ar.err = a.s.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) // ✅
+ case r.Alarm != nil:
+ ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) // ✅
+ case r.Authenticate != nil:
+ ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate) // ✅
+ case r.AuthEnable != nil:
+ ar.resp, ar.err = a.s.applyV3.AuthEnable() // ✅
+ case r.AuthDisable != nil:
+ ar.resp, ar.err = a.s.applyV3.AuthDisable() // ✅
+ case r.AuthStatus != nil:
+ ar.resp, ar.err = a.s.applyV3.AuthStatus() // ✅
+ case r.AuthUserAdd != nil:
+ ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd) // ✅
+ case r.AuthUserDelete != nil:
+ ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete) // ✅
+ case r.AuthUserChangePassword != nil:
+ ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword) // ✅
+ case r.AuthUserGrantRole != nil:
+ ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole) // ✅
+ case r.AuthUserGet != nil:
+ ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet) // ✅
+ case r.AuthUserRevokeRole != nil:
+ ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole) // ✅
+ case r.AuthUserList != nil:
+ ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList) // ✅
+ case r.AuthRoleAdd != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd) // ✅
+ case r.AuthRoleGrantPermission != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) // ✅
+ case r.AuthRoleGet != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet) // ✅
+ case r.AuthRoleRevokePermission != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) // ✅
+ case r.AuthRoleDelete != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete) // ✅
+ case r.AuthRoleList != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList) // ✅
+ default:
+ a.s.lg.Panic("没有实现应用", zap.Stringer("raft-request", r))
+ }
+ return ar
+}
+
+// 等待leader就绪
+func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
+ leader := s.cluster.Member(s.Leader())
+ for leader == nil {
+ // 等待选举超时
+ dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond
+ select {
+ case <-time.After(dur):
+ leader = s.cluster.Member(s.Leader())
+ case <-s.stopping:
+ return nil, ErrStopped
+ case <-ctx.Done():
+ return nil, ErrNoLeader
+ }
+ }
+ if leader == nil || len(leader.PeerURLs) == 0 {
+ return nil, ErrNoLeader
+ }
+ return leader, nil
+}
+
+// Alarm 发送警报信息
+func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ req := pb.InternalRaftRequest{Alarm: r}
+ // marshal, _ := json.Marshal(req)
+ // fmt.Println("marshal-->",string(marshal))
+ resp, err := s.raftRequestOnce(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AlarmResponse), nil
+}
diff --git a/etcd/etcdserver/v3service_auth.go b/etcd/etcdserver/v3service_auth.go
new file mode 100644
index 00000000000..83e62bb5ae2
--- /dev/null
+++ b/etcd/etcdserver/v3service_auth.go
@@ -0,0 +1,230 @@
+package etcdserver
+
+import (
+ "context"
+ "encoding/base64"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/ls-2018/etcd_cn/etcd/auth"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc"
+)
+
+type AuthClient interface {
+ Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (*pb.AuthenticateResponse, error)
+ AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (*pb.AuthEnableResponse, error)
+ AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (*pb.AuthDisableResponse, error)
+ AuthStatus(ctx context.Context, in *pb.AuthStatusRequest, opts ...grpc.CallOption) (*pb.AuthStatusResponse, error)
+ UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (*pb.AuthUserAddResponse, error)
+ UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (*pb.AuthUserGetResponse, error)
+ UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (*pb.AuthUserListResponse, error)
+ UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (*pb.AuthUserDeleteResponse, error)
+ UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*pb.AuthUserChangePasswordResponse, error)
+ UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserGrantRoleResponse, error)
+ UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserRevokeRoleResponse, error)
+ RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (*pb.AuthRoleAddResponse, error)
+ RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (*pb.AuthRoleGetResponse, error)
+ RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (*pb.AuthRoleListResponse, error)
+ RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (*pb.AuthRoleDeleteResponse, error)
+ RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleGrantPermissionResponse, error)
+ RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleRevokePermissionResponse, error)
+}
+
+func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthEnable: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthEnableResponse), nil
+}
+
+func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthDisableResponse), nil
+}
+
+func (s *EtcdServer) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthStatus: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthStatusResponse), nil
+}
+
+func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ if err := s.linearizeReadNotify(ctx); err != nil {
+ return nil, err
+ }
+
+ lg := s.Logger()
+
+ var resp proto.Message
+ for {
+ checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
+ if err != nil {
+ if err != auth.ErrAuthNotEnabled {
+ lg.Warn(
+ "invalid authentication was requested",
+ zap.String("user", r.Name),
+ zap.Error(err),
+ )
+ }
+ return nil, err
+ }
+
+ st, err := s.AuthStore().GenTokenPrefix()
+ if err != nil {
+ return nil, err
+ }
+
+ // internalReq doesn't need to have Password because the above s.AuthStore().CheckPassword() already did it.
+ // In addition, it will let a WAL entry not record password as a plain text.
+ internalReq := &pb.InternalAuthenticateRequest{
+ Name: r.Name,
+ SimpleToken: st,
+ }
+
+ resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
+ if err != nil {
+ return nil, err
+ }
+ if checkedRevision == s.AuthStore().Revision() {
+ break
+ }
+
+ lg.Info("revision when password checked became stale; retrying")
+ }
+
+ return resp.(*pb.AuthenticateResponse), nil
+}
+
+// ------------------------------------------- OVER ---------------------------------------------------------vv
+
+func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ if r.Options == nil || !r.Options.NoPassword {
+ hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost())
+ if err != nil {
+ return nil, err
+ }
+ r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword)
+ r.Password = ""
+ }
+
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserAddResponse), nil
+}
+
+func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserDeleteResponse), nil
+}
+
+func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ if r.Password != "" {
+ hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost())
+ if err != nil {
+ return nil, err
+ }
+ r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword)
+ r.Password = ""
+ }
+
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserChangePasswordResponse), nil
+}
+
+func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserGrantRoleResponse), nil
+}
+
+func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserGetResponse), nil
+}
+
+func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserListResponse), nil
+}
+
+func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserRevokeRoleResponse), nil
+}
+
+// ------------------------------------------- OVER ---------------------------------------------------------vv
+
+func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleGrantPermissionResponse), nil
+}
+
+func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleGetResponse), nil
+}
+
+func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleListResponse), nil
+}
+
+func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleRevokePermissionResponse), nil
+}
+
+func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleDeleteResponse), nil
+}
+
+func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleAddResponse), nil
+}
diff --git a/server/etcdserver/zap_raft.go b/etcd/etcdserver/zap_raft.go
similarity index 94%
rename from server/etcdserver/zap_raft.go
rename to etcd/etcdserver/zap_raft.go
index 66dd3caad0d..69540c71189 100644
--- a/server/etcdserver/zap_raft.go
+++ b/etcd/etcdserver/zap_raft.go
@@ -17,7 +17,7 @@ package etcdserver
import (
"errors"
- "go.etcd.io/raft/v3"
+ "github.com/ls-2018/etcd_cn/raft"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
@@ -37,8 +37,7 @@ func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
- skipCallerLg := lg.WithOptions(zap.AddCallerSkip(1))
- return &zapRaftLogger{lg: skipCallerLg, sugar: skipCallerLg.Sugar()}
+ return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
}
// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
diff --git a/etcd/lease/leasehttp/over_http.go b/etcd/lease/leasehttp/over_http.go
new file mode 100644
index 00000000000..88da7bebe59
--- /dev/null
+++ b/etcd/lease/leasehttp/over_http.go
@@ -0,0 +1,242 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasehttp
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/lease/leasepb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/httputil"
+)
+
+var (
+ LeasePrefix = "/leases"
+ LeaseInternalPrefix = "/leases/internal"
+ applyTimeout = time.Second
+ ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out")
+)
+
+func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler {
+ return &leaseHandler{l, waitch}
+}
+
+type leaseHandler struct {
+ l lease.Lessor
+ waitch func() <-chan struct{}
+}
+
+func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ defer r.Body.Close()
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "error reading body", http.StatusBadRequest)
+ return
+ }
+
+ var v []byte
+ switch r.URL.Path {
+ case LeasePrefix:
+ lreq := pb.LeaseKeepAliveRequest{}
+ if uerr := lreq.Unmarshal(b); uerr != nil {
+ http.Error(w, "反序列失败", http.StatusBadRequest)
+ return
+ }
+ select {
+ case <-h.waitch():
+ case <-time.After(applyTimeout):
+ http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
+ return
+ }
+ ttl, rerr := h.l.Renew(lease.LeaseID(lreq.ID))
+ if rerr != nil {
+ if rerr == lease.ErrLeaseNotFound {
+ http.Error(w, rerr.Error(), http.StatusNotFound)
+ return
+ }
+
+ http.Error(w, rerr.Error(), http.StatusBadRequest)
+ return
+ }
+ // 填写ResponseHeader
+ resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
+ v, err = resp.Marshal()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ case LeaseInternalPrefix:
+ lreq := leasepb.LeaseInternalRequest{}
+ if lerr := lreq.Unmarshal(b); lerr != nil {
+ http.Error(w, "error unmarshalling request", http.StatusBadRequest)
+ return
+ }
+ select {
+ case <-h.waitch():
+ case <-time.After(applyTimeout):
+ http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
+ return
+ }
+ l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID))
+ if l == nil {
+ http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound)
+ return
+ }
+ resp := &leasepb.LeaseInternalResponse{
+ LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{
+ Header: &pb.ResponseHeader{},
+ ID: lreq.LeaseTimeToLiveRequest.ID,
+ TTL: int64(l.Remaining().Seconds()),
+ GrantedTTL: l.TTL(),
+ },
+ }
+ if lreq.LeaseTimeToLiveRequest.Keys {
+ ks := l.Keys()
+ kbs := make([][]byte, len(ks))
+ for i := range ks {
+ kbs[i] = []byte(ks[i])
+ }
+ resp.LeaseTimeToLiveResponse.Keys = kbs
+ }
+
+ v, err = resp.Marshal()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ default:
+ http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/protobuf")
+ w.Write(v)
+}
+
+func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) {
+ lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal()
+ if err != nil {
+ return -1, err
+ }
+
+ cc := &http.Client{Transport: rt}
+ req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
+ if err != nil {
+ return -1, err
+ }
+ req.Header.Set("Content-Type", "application/protobuf")
+ req.Cancel = ctx.Done()
+
+ resp, err := cc.Do(req)
+ if err != nil {
+ return -1, err
+ }
+ b, err := readResponse(resp)
+ if err != nil {
+ return -1, err
+ }
+
+ if resp.StatusCode == http.StatusRequestTimeout {
+ return -1, ErrLeaseHTTPTimeout
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return -1, lease.ErrLeaseNotFound
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return -1, fmt.Errorf("lease: unknown error(%s)", string(b))
+ }
+
+ lresp := &pb.LeaseKeepAliveResponse{}
+ if err := lresp.Unmarshal(b); err != nil {
+ return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
+ }
+ if lresp.ID != int64(id) {
+ return -1, fmt.Errorf("lease: renew id mismatch")
+ }
+ return lresp.TTL, nil
+}
+
+func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) {
+ // will post lreq protobuf to leader
+ lreq, err := (&leasepb.LeaseInternalRequest{
+ LeaseTimeToLiveRequest: &pb.LeaseTimeToLiveRequest{
+ ID: int64(id),
+ Keys: keys,
+ },
+ }).Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/protobuf")
+
+ req = req.WithContext(ctx)
+
+ cc := &http.Client{Transport: rt}
+ var b []byte
+ // buffer errc channel so that errc don't block inside the go routinue
+ resp, err := cc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ b, err = readResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode == http.StatusRequestTimeout {
+ return nil, ErrLeaseHTTPTimeout
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ return nil, lease.ErrLeaseNotFound
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("lease: unknown error(%s)", string(b))
+ }
+
+ lresp := &leasepb.LeaseInternalResponse{}
+ if err := lresp.Unmarshal(b); err != nil {
+ return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
+ }
+ if lresp.LeaseTimeToLiveResponse.ID != int64(id) {
+ return nil, fmt.Errorf("lease: renew id mismatch")
+ }
+ return lresp, nil
+}
+
+func readResponse(resp *http.Response) (b []byte, err error) {
+ b, err = ioutil.ReadAll(resp.Body)
+ httputil.GracefulClose(resp)
+ return
+}
diff --git a/etcd/lease/leasepb/lease.pb.go b/etcd/lease/leasepb/lease.pb.go
new file mode 100644
index 00000000000..ca416b03ff9
--- /dev/null
+++ b/etcd/lease/leasepb/lease.pb.go
@@ -0,0 +1,142 @@
+// Code generated by protoc-gen-gogo.
+// source: lease.proto
+
+package leasepb
+
+import (
+ "encoding/json"
+ fmt "fmt"
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ etcdserverpb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+
+var (
+ _ = fmt.Errorf
+ _ = math.Inf
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Lease struct {
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ RemainingTTL int64 `protobuf:"varint,3,opt,name=RemainingTTL,proto3" json:"RemainingTTL,omitempty"`
+}
+
+func (m *Lease) Reset() { *m = Lease{} }
+func (m *Lease) String() string { return proto.CompactTextString(m) }
+func (*Lease) ProtoMessage() {}
+func (*Lease) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dd57e402472b33a, []int{0}
+}
+
+type LeaseInternalRequest struct {
+ LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest,proto3" json:"LeaseTimeToLiveRequest,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} }
+func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseInternalRequest) ProtoMessage() {}
+func (*LeaseInternalRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dd57e402472b33a, []int{1}
+}
+
+type LeaseInternalResponse struct {
+ LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse,proto3" json:"LeaseTimeToLiveResponse,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} }
+func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseInternalResponse) ProtoMessage() {}
+func (*LeaseInternalResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dd57e402472b33a, []int{2}
+}
+
+func init() {
+ proto.RegisterType((*Lease)(nil), "leasepb.Lease")
+ proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest")
+ proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse")
+}
+
+func init() { proto.RegisterFile("lease.proto", fileDescriptor_3dd57e402472b33a) }
+
+var fileDescriptor_3dd57e402472b33a = []byte{
+ // 256 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
+ 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
+ 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x3e, 0xb5, 0x24, 0x39, 0x45,
+ 0x3f, 0xb1, 0x20, 0x53, 0x1f, 0xc4, 0x28, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0x2a, 0x48, 0xd2, 0x2f,
+ 0x2a, 0x48, 0x86, 0x28, 0x50, 0xf2, 0xe5, 0x62, 0xf5, 0x01, 0x99, 0x20, 0xc4, 0xc7, 0xc5, 0xe4,
+ 0xe9, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x1c, 0xc4, 0xe4, 0xe9, 0x22, 0x24, 0xc0, 0xc5, 0x1c,
+ 0x12, 0xe2, 0x23, 0xc1, 0x04, 0x16, 0x00, 0x31, 0x85, 0x94, 0xb8, 0x78, 0x82, 0x52, 0x73, 0x13,
+ 0x33, 0xf3, 0x32, 0xf3, 0xd2, 0x41, 0x52, 0xcc, 0x60, 0x29, 0x14, 0x31, 0xa5, 0x12, 0x2e, 0x11,
+ 0xb0, 0x71, 0x9e, 0x79, 0x25, 0xa9, 0x45, 0x79, 0x89, 0x39, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5,
+ 0x25, 0x42, 0x31, 0x5c, 0x62, 0x60, 0xf1, 0x90, 0xcc, 0xdc, 0xd4, 0x90, 0x7c, 0x9f, 0xcc, 0xb2,
+ 0x54, 0xa8, 0x0c, 0xd8, 0x46, 0x6e, 0x23, 0x15, 0x3d, 0x64, 0xf7, 0xe9, 0x61, 0x57, 0x1b, 0x84,
+ 0xc3, 0x0c, 0xa5, 0x0a, 0x2e, 0x51, 0x34, 0x5b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe2,
+ 0xb9, 0xc4, 0x31, 0xb4, 0x40, 0xa4, 0xa0, 0xf6, 0xaa, 0x12, 0xb0, 0x17, 0xa2, 0x38, 0x08, 0x97,
+ 0x29, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe2, 0x91, 0x1c, 0xe3,
+ 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x0e,
+ 0x5f, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x8a, 0x94, 0xb9, 0xae, 0x01, 0x00, 0x00,
+}
+
+func (m *Lease) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
+
+func (m *Lease) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *LeaseInternalRequest) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *LeaseInternalResponse) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *Lease) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+var (
+ ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/server/lease/leasepb/lease.proto b/etcd/lease/leasepb/lease.proto
similarity index 100%
rename from server/lease/leasepb/lease.proto
rename to etcd/lease/leasepb/lease.proto
diff --git a/etcd/lease/over_lease_queue.go b/etcd/lease/over_lease_queue.go
new file mode 100644
index 00000000000..38aa5b6829c
--- /dev/null
+++ b/etcd/lease/over_lease_queue.go
@@ -0,0 +1,111 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+ "container/heap"
+ "time"
+)
+
+type LeaseWithTime struct {
+ id LeaseID
+ time time.Time
+ index int
+}
+
+type LeaseQueue []*LeaseWithTime
+
+func (pq LeaseQueue) Len() int { return len(pq) }
+
+func (pq LeaseQueue) Less(i, j int) bool {
+ return pq[i].time.Before(pq[j].time)
+}
+
+func (pq LeaseQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+ pq[i].index = i
+ pq[j].index = j
+}
+
+func (pq *LeaseQueue) Push(x interface{}) {
+ n := len(*pq)
+ item := x.(*LeaseWithTime)
+ item.index = n
+ *pq = append(*pq, item)
+}
+
+func (pq *LeaseQueue) Pop() interface{} {
+ old := *pq
+ n := len(old)
+ item := old[n-1]
+ item.index = -1 // for safety
+ *pq = old[0 : n-1]
+ return item
+}
+
+var _ heap.Interface = &LeaseQueue{}
+
+// ExpiredNotifier 一个租约只能保存一个key,`Register`将更新相应的租约时间.
+// 用于通知lessor移除过期租约的队列
+type ExpiredNotifier struct {
+ m map[LeaseID]*LeaseWithTime
+ queue LeaseQueue
+}
+
+// 租约到期通知器
+func newLeaseExpiredNotifier() *ExpiredNotifier {
+ return &ExpiredNotifier{
+ m: make(map[LeaseID]*LeaseWithTime),
+ queue: make(LeaseQueue, 0),
+ }
+}
+
+// Init ok
+func (mq *ExpiredNotifier) Init() {
+ heap.Init(&mq.queue)
+ mq.m = make(map[LeaseID]*LeaseWithTime)
+ for _, item := range mq.queue {
+ mq.m[item.id] = item
+ }
+}
+
+// RegisterOrUpdate 注册或更新管理的租约
+func (mq *ExpiredNotifier) RegisterOrUpdate(item *LeaseWithTime) {
+ if old, ok := mq.m[item.id]; ok {
+ old.time = item.time // 过期时间
+ heap.Fix(&mq.queue, old.index) // 当元素值,发生改变, Fix会重新调整顺序
+ } else {
+ heap.Push(&mq.queue, item) // 创建
+ mq.m[item.id] = item
+ }
+}
+
+func (mq *ExpiredNotifier) Unregister() *LeaseWithTime {
+ item := heap.Pop(&mq.queue).(*LeaseWithTime)
+ delete(mq.m, item.id)
+ return item
+}
+
+// Poll 获取第一个要快要过期的租约
+func (mq *ExpiredNotifier) Poll() *LeaseWithTime {
+ if mq.Len() == 0 {
+ return nil
+ }
+ return mq.queue[0]
+}
+
+func (mq *ExpiredNotifier) Len() int {
+ return len(mq.m)
+}
diff --git a/etcd/lease/over_lessor.go b/etcd/lease/over_lessor.go
new file mode 100644
index 00000000000..53f7fc7fda9
--- /dev/null
+++ b/etcd/lease/over_lessor.go
@@ -0,0 +1,866 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+ "container/heap"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/ls-2018/etcd_cn/etcd/lease/leasepb"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "go.uber.org/zap"
+)
+
+const (
+ NoLease = LeaseID(0) // 是一个特殊的LeaseID,表示没有租约.
+ MaxLeaseTTL = 9000000000
+)
+
+var v3_6 = semver.Version{Major: 3, Minor: 6}
+
+var (
+ forever = time.Time{}
+ leaseRevokeRate = 1000 // 每秒撤销租约的最大数量;可为测试配置
+ leaseCheckpointRate = 1000 // 每秒记录在共识日志中的最大租约快照数量;可对测试进行配置
+ defaultLeaseCheckpointInterval = 5 * time.Minute // 租约快照的默认时间间隔
+ maxLeaseCheckpointBatchSize = 1000 // 租约快照的最大数量,以批处理为一个单一的共识日志条目
+ defaultExpiredleaseRetryInterval = 3 * time.Second // 检查过期租约是否被撤销的默认时间间隔.
+ ErrNotPrimary = errors.New("不是主 lessor")
+ ErrLeaseNotFound = errors.New("lease没有发现")
+ ErrLeaseExists = errors.New("lease已存在")
+ ErrLeaseTTLTooLarge = errors.New("过大的TTL")
+)
+
+type TxnDelete interface {
+ DeleteRange(key, end []byte) (n, rev int64)
+ End()
+}
+
+type RangeDeleter func() TxnDelete
+
+// Checkpointer 允许对租约剩余ttl的检查点到 wal日志.这里定义是为了避免与mvcc的循环依赖.
+type Checkpointer func(ctx context.Context, lc *pb.LeaseCheckpointRequest)
+
+type LeaseID int64
+
+// Lessor 创建、移除、更新租约
+type Lessor interface {
+ // SetRangeDeleter lets the lessor create TxnDeletes to the store.
+ // Lessor deletes the items in the revoked or expired lease by creating
+ // new TxnDeletes.
+ SetRangeDeleter(rd RangeDeleter)
+ SetCheckpointer(cp Checkpointer)
+ Grant(id LeaseID, ttl int64) (*Lease, error) // 创建一个制定了过期时间的租约
+ Revoke(id LeaseID) error // 移除租约
+ Checkpoint(id LeaseID, remainingTTL int64) error // 更新租约的剩余时间到其他节点
+ Attach(id LeaseID, items []LeaseItem) error //
+ GetLease(item LeaseItem) LeaseID // 返回给定项目的LeaseID.如果没有找到租约,则返回NoLease值.
+ Detach(id LeaseID, items []LeaseItem) error // 将租约从key上移除
+ Promote(extend time.Duration) // 推动lessor成为主lessor.主lessor管理租约的到期和续期.新晋升的lessor更新所有租约的ttl 以延长先前的ttl
+ Demote() // leader变更,触发
+ Renew(id LeaseID) (int64, error) // 重新计算过期时间
+ Lookup(id LeaseID) *Lease
+ Leases() []*Lease // 获取当前节点上的所有租约
+ ExpiredLeasesC() <-chan []*Lease // 返回一个用于接收过期租约的CHAN.
+ Recover(b backend.Backend, rd RangeDeleter)
+ Stop()
+}
+
+type lessor struct {
+ mu sync.RWMutex
+ demotec chan struct{} // 当lessor成为主时,会被设置.当被降级时,会被关闭
+ leaseMap map[LeaseID]*Lease // 存储了所有的租约
+ leaseExpiredNotifier *ExpiredNotifier // 租约到期管理
+ leaseCheckpointHeap LeaseQueue // 记录检查点,租约ID
+ itemMap map[LeaseItem]LeaseID // key 关联到了哪个租约
+ rd RangeDeleter // 租约过期时,使用范围删除
+ cp Checkpointer // 当一个租约的最后期限应该被持久化,以保持跨领袖选举和重启的剩余TTL,出租人将通过Checkpointer对租约进行检查.
+ b backend.Backend // 持久化租约到bolt.db.
+ minLeaseTTL int64 // 是可授予租约的最小租期TTL.任何缩短TTL的请求都被扩展到最小TTL.
+ expiredC chan []*Lease // 发送一批已经过期的租约
+ // stopC is a channel whose closure indicates that the lessor should be stopped.
+ stopC chan struct{}
+ doneC chan struct{} // close表明lessor被停止.
+ lg *zap.Logger
+ checkpointInterval time.Duration // 租约快照的默认时间间隔
+ expiredLeaseRetryInterval time.Duration // 检查过期租约是否被撤销的默认时间间隔
+ checkpointPersist bool // lessor是否应始终保持剩余的TTL(在v3.6中始终启用).
+ cluster cluster // 基于集群版本 调整lessor逻辑
+}
+type Lease struct {
+ ID LeaseID // 租约ID , 自增得到的,
+ ttl int64 // 租约的生存时间,以秒为单位
+ remainingTTL int64 // 剩余生存时间,以秒为单位,如果为零,则视为未设置,应使用完整的tl.
+ expiryMu sync.RWMutex // 保护并发的访问
+ expiry time.Time // 是租约到期的时间.当expiry.IsZero()为真时,永久存在.
+ mu sync.RWMutex // 保护并发的访问 itemSet
+ itemSet map[LeaseItem]struct{} // 哪些租约附加到了key
+ revokec chan struct{} // 租约被删除、到期 关闭此channel,触发后续逻辑
+}
+
+type cluster interface {
+ Version() *semver.Version // 是整个集群的最小major.minor版本.
+}
+
+type LessorConfig struct {
+ MinLeaseTTL int64 // 是可授予租约的最小租期TTL.任何缩短TTL的请求都被扩展到最小TTL.
+ CheckpointInterval time.Duration // 租约快照的默认时间间隔
+ ExpiredLeasesRetryInterval time.Duration // 租约快照的默认时间间隔
+ CheckpointPersist bool // lessor是否应始终保持剩余的TTL(在v3.6中始终启用).
+}
+
+func NewLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) Lessor {
+ return newLessor(lg, b, cluster, cfg)
+}
+
+func (le *lessor) shouldPersistCheckpoints() bool {
+ cv := le.cluster.Version()
+ return le.checkpointPersist || (cv != nil && greaterOrEqual(*cv, v3_6))
+}
+
+func greaterOrEqual(first, second semver.Version) bool {
+ return !first.LessThan(second)
+}
+
+// Promote 当节点成为leader时
+func (le *lessor) Promote(extend time.Duration) {
+ // extend 是选举超时
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.demotec = make(chan struct{})
+
+ // 刷新所有租约的过期时间
+ for _, l := range le.leaseMap {
+ l.refresh(extend)
+ item := &LeaseWithTime{id: l.ID, time: l.expiry}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item) // 开始监听租约过期
+ le.scheduleCheckpointIfNeeded(l)
+ }
+
+ if len(le.leaseMap) < leaseRevokeRate {
+ // 没有租约堆积的可能性
+ return
+ }
+
+ // 如果有重叠,请调整过期时间
+ leases := le.unsafeLeases()
+ sort.Sort(leasesByExpiry(leases))
+
+ baseWindow := leases[0].Remaining() // 剩余存活时间
+ nextWindow := baseWindow + time.Second
+ expires := 0 // 到期
+ // 失效期限少于总失效率,所以堆积的租约不会消耗整个失效限制
+ targetExpiresPerSecond := (3 * leaseRevokeRate) / 4
+ for _, l := range leases {
+ remaining := l.Remaining()
+ if remaining > nextWindow {
+ baseWindow = remaining
+ nextWindow = baseWindow + time.Second
+ expires = 1
+ continue
+ }
+ expires++
+ if expires <= targetExpiresPerSecond {
+ continue
+ }
+ rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond))
+ // 如果租期延长n秒,则比基本窗口早n秒的租期只应延长1秒.
+ rateDelay -= float64(remaining - baseWindow)
+ delay := time.Duration(rateDelay)
+ nextWindow = baseWindow + delay
+ l.refresh(delay + extend)
+ item := &LeaseWithTime{id: l.ID, time: l.expiry}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.scheduleCheckpointIfNeeded(l)
+ }
+}
+
+type leasesByExpiry []*Lease
+
+func (le leasesByExpiry) Len() int { return len(le) }
+func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() }
+func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] }
+
+func (le *lessor) GetLease(item LeaseItem) LeaseID {
+ le.mu.RLock()
+ id := le.itemMap[item] // 找不到就是永久
+ le.mu.RUnlock()
+ return id
+}
+
+func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.b = b
+ le.rd = rd
+ le.leaseMap = make(map[LeaseID]*Lease)
+ le.itemMap = make(map[LeaseItem]LeaseID)
+ le.initAndRecover()
+}
+
+func (le *lessor) Stop() {
+ close(le.stopC)
+ <-le.doneC
+}
+
+// --------------------------------------------- OVER -----------------------------------------------------------------
+
+// FakeLessor is a fake implementation of Lessor interface.
+// Used for testing only.
+type FakeLessor struct{}
+
+func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {}
+
+func (fl *FakeLessor) SetCheckpointer(cp Checkpointer) {}
+
+func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil }
+
+func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
+
+func (fl *FakeLessor) Checkpoint(id LeaseID, remainingTTL int64) error { return nil }
+
+func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
+
+func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 }
+
+func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
+
+func (fl *FakeLessor) Promote(extend time.Duration) {}
+
+func (fl *FakeLessor) Demote() {}
+
+func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil }
+
+func (fl *FakeLessor) Lookup(id LeaseID) *Lease { return nil }
+
+func (fl *FakeLessor) Leases() []*Lease { return nil }
+
+func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil }
+
+func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {}
+
+func (fl *FakeLessor) Stop() {}
+
+type FakeTxnDelete struct {
+ backend.BatchTx
+}
+
+func (ftd *FakeTxnDelete) DeleteRange(key, end []byte) (n, rev int64) { return 0, 0 }
+func (ftd *FakeTxnDelete) End() { ftd.Unlock() }
+
+// --------------------------------------------- OVER -----------------------------------------------------------------
+
+// Grant 创建租约
+func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
+ if id == NoLease {
+ return nil, ErrLeaseNotFound
+ }
+
+ if ttl > MaxLeaseTTL {
+ return nil, ErrLeaseTTLTooLarge
+ }
+
+ // lessor在高负荷时,应延长租期,以减少续租.
+ l := &Lease{
+ ID: id,
+ ttl: ttl,
+ itemSet: make(map[LeaseItem]struct{}),
+ revokec: make(chan struct{}), // 租约被删除、到期 关闭此channel,触发后续逻辑
+ }
+
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ if _, ok := le.leaseMap[id]; ok {
+ return nil, ErrLeaseExists
+ }
+
+ if l.ttl < le.minLeaseTTL {
+ l.ttl = le.minLeaseTTL
+ }
+
+ if le.isPrimary() { // 是否还是主lessor
+ l.refresh(0) // 刷新租约的过期时间
+ } else {
+ l.forever()
+ }
+
+ le.leaseMap[id] = l
+ l.persistTo(le.b)
+
+ if le.isPrimary() {
+ item := &LeaseWithTime{id: l.ID, time: l.expiry}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.scheduleCheckpointIfNeeded(l)
+ }
+
+ return l, nil
+}
+
+// expireExists 返回是否有已过期的租约, next 表明它可能在下次尝试中存在.
+func (le *lessor) expireExists() (l *Lease, ok bool, next bool) {
+ if le.leaseExpiredNotifier.Len() == 0 {
+ return nil, false, false
+ }
+
+ item := le.leaseExpiredNotifier.Poll() // 获取第一个,不会从堆中剔除
+ l = le.leaseMap[item.id]
+ if l == nil {
+ // 租约已过期或 已经被移除,不需要再次移除
+ le.leaseExpiredNotifier.Unregister() // O(log N) 弹出第一个
+ return nil, false, true
+ }
+ now := time.Now()
+ if now.Before(item.time) {
+ // 判断时间有没有过期
+ return l, false, false
+ }
+
+ // recheck if revoke is complete after retry interval
+ item.time = now.Add(le.expiredLeaseRetryInterval)
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ return l, true, false
+}
+
+// findExpiredLeases 在leaseExpiredNotifier中的小顶堆中删除过期的lease,有数量限制
+func (le *lessor) findExpiredLeases(limit int) []*Lease {
+ leases := make([]*Lease, 0, 16)
+
+ for {
+ l, ok, next := le.expireExists() // 获取一个已过期的 租约,以及之后是否可能仍然存在
+ if !ok && !next {
+ // 当前没有,以后不存在
+ break
+ }
+ if !ok {
+ // 当前没有
+ continue
+ }
+ if next {
+ // 以后存在
+ continue
+ }
+ //
+ if l.expired() {
+ leases = append(leases, l)
+ if len(leases) == limit {
+ break
+ }
+ }
+ }
+
+ return leases
+}
+
+// 查找所有过期的租约,并将其发送到过期的通道中等待撤销.
+func (le *lessor) revokeExpiredLeases() {
+ var ls []*Lease
+
+ // 每秒撤销租约的最大数量, 500ms调用一次,那么限制应该改为 /2
+ revokeLimit := leaseRevokeRate / 2
+
+ le.mu.RLock()
+ if le.isPrimary() { // 主
+ // 在leaseExpiredNotifier中的小顶堆中删除过期的lease,有数量限制
+ ls = le.findExpiredLeases(revokeLimit)
+ }
+ le.mu.RUnlock()
+
+ if len(ls) != 0 {
+ select {
+ case <-le.stopC:
+ return
+ case le.expiredC <- ls:
+ default:
+ // expiredC的接收器可能正在忙着处理其他东西,下次500ms后再试
+ }
+ }
+}
+
+// ExpiredLeasesC 返回一批已过期的租约
+func (le *lessor) ExpiredLeasesC() <-chan []*Lease {
+ return le.expiredC
+}
+
+// Revoke 从kvindex以及bolt.db中删除
+func (le *lessor) Revoke(id LeaseID) error {
+ le.mu.Lock()
+
+ l := le.leaseMap[id]
+ if l == nil {
+ le.mu.Unlock()
+ return ErrLeaseNotFound
+ }
+ defer close(l.revokec)
+ le.mu.Unlock()
+ // mvcc.newWatchableStore
+ if le.rd == nil {
+ return nil
+ }
+
+ txn := le.rd()
+
+ // 对键进行排序,以便在所有成员中以相同的顺序删除,否则后台的哈希值将是不同的.
+ keys := l.Keys() // 返回当前组约绑定到了哪些key
+ sort.StringSlice(keys).Sort()
+ for _, key := range keys { // 该租约附加到了哪些key上
+ fmt.Printf("租约:%d到期 删除key:%s \n", id, key)
+ txn.DeleteRange([]byte(key), nil) // 从内存 kvindex 中 删除
+ }
+
+ le.mu.Lock()
+ defer le.mu.Unlock()
+ delete(le.leaseMap, l.ID)
+ // 租约的删除需要与kv的删除在同一个后台事务中.否则,如果 etcdserver 在两者之间发生故障,我们可能会出现不执行撤销或不删除钥匙的结果.
+ le.b.BatchTx().UnsafeDelete(buckets.Lease, int64ToBytes(int64(l.ID))) // 删除bolt.db 里的key
+ txn.End()
+ return nil
+}
+
+// Remaining 返回剩余时间
+func (l *Lease) Remaining() time.Duration {
+ l.expiryMu.RLock()
+ defer l.expiryMu.RUnlock()
+ if l.expiry.IsZero() {
+ return time.Duration(math.MaxInt64)
+ }
+ return time.Until(l.expiry)
+}
+
+type LeaseItem struct {
+ Key string
+}
+
+func int64ToBytes(n int64) []byte {
+ bytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(bytes, uint64(n))
+ return bytes
+}
+
+// 是否已过期
+func (l *Lease) expired() bool {
+ return l.Remaining() <= 0
+}
+
+// 持久化租约
+func (l *Lease) persistTo(b backend.Backend) {
+ key := int64ToBytes(int64(l.ID))
+
+ lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL}
+ val, err := lpb.Marshal()
+ if err != nil {
+ panic("序列化lease消息失败")
+ }
+
+ b.BatchTx().Lock()
+ b.BatchTx().UnsafePut(buckets.Lease, key, val)
+ b.BatchTx().Unlock()
+}
+
+func (l *Lease) TTL() int64 {
+ return l.ttl
+}
+
+// Keys 返回当前组约绑定到了哪些key
+func (l *Lease) Keys() []string {
+ l.mu.RLock()
+ keys := make([]string, 0, len(l.itemSet))
+ for k := range l.itemSet {
+ keys = append(keys, k.Key)
+ }
+ l.mu.RUnlock()
+ return keys
+}
+
+// getRemainingTTL returns the last checkpointed remaining TTL of the lease.
+func (l *Lease) getRemainingTTL() int64 {
+ if l.remainingTTL > 0 {
+ return l.remainingTTL
+ }
+ return l.ttl
+}
+
+// 创建租约管理器
+func newLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) *lessor {
+ checkpointInterval := cfg.CheckpointInterval
+ expiredLeaseRetryInterval := cfg.ExpiredLeasesRetryInterval
+ if checkpointInterval == 0 {
+ checkpointInterval = defaultLeaseCheckpointInterval
+ }
+ if expiredLeaseRetryInterval == 0 {
+ expiredLeaseRetryInterval = defaultExpiredleaseRetryInterval
+ }
+ l := &lessor{
+ leaseMap: make(map[LeaseID]*Lease),
+ itemMap: make(map[LeaseItem]LeaseID),
+ leaseExpiredNotifier: newLeaseExpiredNotifier(), // 租约到期移除的队列
+ leaseCheckpointHeap: make(LeaseQueue, 0),
+ b: b, // bolt.db
+ minLeaseTTL: cfg.MinLeaseTTL, // 是可授予租约的最小租期TTL.任何缩短TTL的请求都被扩展到最小TTL.
+ checkpointInterval: checkpointInterval, // 租约快照的默认时间间隔
+ expiredLeaseRetryInterval: expiredLeaseRetryInterval, // 检查过期租约是否被撤销的默认时间间隔
+ checkpointPersist: cfg.CheckpointPersist, // lessor是否应始终保持剩余的TTL(在v3.6中始终启用).
+ expiredC: make(chan []*Lease, 16), // 避免不必要的阻塞
+ stopC: make(chan struct{}),
+ doneC: make(chan struct{}),
+ lg: lg,
+ cluster: cluster,
+ }
+ l.initAndRecover() // 从bolt.db恢复租约信息
+
+ go l.runLoop() // 开始检查是否有快过期的租约
+
+ return l
+}
+
+// isPrimary 表示该出租人是否为主要出租人.主出租人负责管理租约的到期和更新.
+// 在etcd中,raft leader是主要的.因此,在同一时间可能有两个主要的领导者(raft允许同时存在的领导者,但任期不同),最多是一个领导者选举超时.
+// 旧的主要领导者不能影响正确性,因为它的提议有一个较小的期限,不会被提交.
+// TODO:raft的跟随者不转发租约管理提案.可能会有一个非常小的窗口(通常在一秒钟之内,这取决于调度),在raft领导者降级和lessor降级之间
+// 通常情况下,这不应该是一个问题.租约对时间不应该那么敏感.
+func (le *lessor) isPrimary() bool {
+ return le.demotec != nil
+}
+
+// SetRangeDeleter 主要是设置一个用于获取delete的写事务的函数
+func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+ le.rd = rd
+}
+
+// Renew 在租约有效期内,重新计算过期时间
+func (le *lessor) Renew(id LeaseID) (int64, error) {
+ le.mu.RLock()
+ if !le.isPrimary() {
+ le.mu.RUnlock()
+ return -1, ErrNotPrimary
+ }
+
+ demotec := le.demotec
+
+ l := le.leaseMap[id]
+ if l == nil {
+ le.mu.RUnlock()
+ return -1, ErrLeaseNotFound
+ }
+ // 清空剩余时间
+ clearRemainingTTL := le.cp != nil && l.remainingTTL > 0
+
+ le.mu.RUnlock()
+ if l.expired() { // 租约过期了
+ select {
+ case <-l.revokec: // 当租约到期,会关闭
+ return -1, ErrLeaseNotFound
+ case <-demotec:
+ return -1, ErrNotPrimary
+ case <-le.stopC:
+ return -1, ErrNotPrimary
+ }
+ }
+
+ // Clear remaining TTL when we renew if it is set
+ // By applying a RAFT entry only when the remainingTTL is already set, we limit the number
+ // of RAFT entries written per lease to a max of 2 per checkpoint interval.
+ if clearRemainingTTL {
+ // 定期批量地将 Lease 剩余的 TTL 基于 Raft Log 同步给 Follower 节点,Follower 节点收到 CheckPoint 请求后,
+ // 更新内存数据结构 LeaseMap 的剩余 TTL 信息.
+ le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: []*pb.LeaseCheckpoint{{ID: int64(l.ID), RemainingTtl: 0}}})
+ }
+
+ le.mu.Lock()
+ l.refresh(0)
+ item := &LeaseWithTime{id: l.ID, time: l.expiry}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.mu.Unlock()
+
+ return l.ttl, nil
+}
+
+// Lookup 查找租约
+func (le *lessor) Lookup(id LeaseID) *Lease {
+ le.mu.RLock()
+ defer le.mu.RUnlock()
+ return le.leaseMap[id]
+}
+
+// Leases 获取当前节点上的所有租约
+func (le *lessor) Leases() []*Lease {
+ le.mu.RLock()
+ ls := le.unsafeLeases()
+ le.mu.RUnlock()
+ sort.Sort(leasesByExpiry(ls))
+ return ls
+}
+
+// Attach 将一些key附加到租约上
+func (le *lessor) Attach(id LeaseID, items []LeaseItem) error {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ l := le.leaseMap[id]
+ if l == nil {
+ return ErrLeaseNotFound
+ }
+
+ l.mu.Lock()
+ for _, it := range items {
+ l.itemSet[it] = struct{}{}
+ le.itemMap[it] = id
+ }
+ l.mu.Unlock()
+ return nil
+}
+
+// Detach 将一些key从租约上移除
+func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ l := le.leaseMap[id]
+ if l == nil {
+ return ErrLeaseNotFound
+ }
+
+ l.mu.Lock()
+ for _, it := range items {
+ delete(l.itemSet, it)
+ delete(le.itemMap, it)
+ }
+ l.mu.Unlock()
+ return nil
+}
+
+// refresh 刷新租约的过期时间
+func (l *Lease) refresh(extend time.Duration) {
+ newExpiry := time.Now().Add(extend + time.Duration(l.getRemainingTTL())*time.Second)
+ l.expiryMu.Lock()
+ defer l.expiryMu.Unlock()
+ l.expiry = newExpiry
+}
+
+// Demote leader不是自己时,会触发
+func (le *lessor) Demote() {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ // 将所有租约的有效期设置为永久
+ for _, l := range le.leaseMap {
+ l.forever() // 内存中
+ }
+ // 清空租约 检查点信息
+ le.clearScheduledLeasesCheckpoints()
+ // 重置 租约到期通知器
+ le.clearLeaseExpiredNotifier()
+
+ if le.demotec != nil {
+ close(le.demotec)
+ le.demotec = nil
+ }
+}
+
+// forever 设置永久过期时间,当不是主lessor
+func (l *Lease) forever() {
+ l.expiryMu.Lock()
+ defer l.expiryMu.Unlock()
+ l.expiry = forever
+}
+
+// 返回所有租约
+func (le *lessor) unsafeLeases() []*Lease {
+ leases := make([]*Lease, 0, len(le.leaseMap))
+ for _, l := range le.leaseMap {
+ leases = append(leases, l)
+ }
+ return leases
+}
+
+// 清空租约 检查点信息
+func (le *lessor) clearScheduledLeasesCheckpoints() {
+ le.leaseCheckpointHeap = make(LeaseQueue, 0)
+}
+
+// OK
+func (le *lessor) clearLeaseExpiredNotifier() {
+ le.leaseExpiredNotifier = newLeaseExpiredNotifier()
+}
+
+// 从bolt.db恢复租约信息
+func (le *lessor) initAndRecover() {
+ tx := le.b.BatchTx()
+ tx.Lock()
+
+ tx.UnsafeCreateBucket(buckets.Lease)
+ _, vs := tx.UnsafeRange(buckets.Lease, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0)
+ for i := range vs {
+ var lpb leasepb.Lease
+ err := lpb.Unmarshal(vs[i])
+ if err != nil {
+ tx.Unlock()
+ panic("反序列化lease 消息失败")
+ }
+ ID := LeaseID(lpb.ID)
+ if lpb.TTL < le.minLeaseTTL {
+ lpb.TTL = le.minLeaseTTL
+ }
+ le.leaseMap[ID] = &Lease{
+ ID: ID,
+ ttl: lpb.TTL,
+ // itemSet将在恢复键值对将过期时间设置为永久 ,提升时刷新
+ itemSet: make(map[LeaseItem]struct{}),
+ expiry: forever,
+ revokec: make(chan struct{}),
+ remainingTTL: lpb.RemainingTTL,
+ }
+ }
+ le.leaseExpiredNotifier.Init() // 填充mq.m
+ heap.Init(&le.leaseCheckpointHeap)
+ tx.Unlock()
+
+ le.b.ForceCommit()
+}
+
+func (le *lessor) SetCheckpointer(cp Checkpointer) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+ le.cp = cp
+}
+
+// OK
+func (le *lessor) runLoop() {
+ defer close(le.doneC)
+ for {
+ // 查找所有过期的租约,并将其发送到过期的通道中等待撤销.
+ le.revokeExpiredLeases()
+ // 查找所有到期的预定租约检查点将它们提交给检查点以将它们持久化到共识日志中.
+ le.checkpointScheduledLeases() // 定时触发更新 Lease 的剩余到期时间的操作.
+
+ select {
+ case <-time.After(500 * time.Millisecond):
+ case <-le.stopC:
+ return
+ }
+ }
+}
+
+// 查找所有到期的预定租约检查点将它们提交给检查点以将它们持久化到共识日志中.
+func (le *lessor) checkpointScheduledLeases() {
+ var cps []*pb.LeaseCheckpoint
+
+ // rate limit
+ for i := 0; i < leaseCheckpointRate/2; i++ {
+ le.mu.Lock()
+ if le.isPrimary() {
+ cps = le.findDueScheduledCheckpoints(maxLeaseCheckpointBatchSize)
+ }
+ le.mu.Unlock()
+
+ if len(cps) != 0 {
+ // 定期批量地将 Lease 剩余的 TTL 基于 Raft Log 同步给 Follower 节点,Follower 节点收到 CheckPoint 请求后,
+ // 更新内存数据结构 LeaseMap 的剩余 TTL 信息.
+ // srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp})
+ // case r.LeaseCheckpoint != nil:
+ le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: cps})
+ }
+ if len(cps) < maxLeaseCheckpointBatchSize {
+ return
+ }
+ }
+}
+
+// 开始执行检查 ,leader 变更时,防止ttl重置
+// 租约创建时、成为leader后、收到checkpoint 共识消息后
+func (le *lessor) scheduleCheckpointIfNeeded(lease *Lease) {
+ if le.cp == nil {
+ return
+ }
+ // 剩余存活时间,大于 checkpointInterval
+ le.checkpointInterval = time.Second * 20
+ if lease.getRemainingTTL() > int64(le.checkpointInterval.Seconds()) {
+ if le.lg != nil {
+ le.lg.Info("开始调度 租约 检查", zap.Int64("leaseID", int64(lease.ID)), zap.Duration("intervalSeconds", le.checkpointInterval))
+ }
+ heap.Push(&le.leaseCheckpointHeap, &LeaseWithTime{
+ id: lease.ID,
+ time: time.Now().Add(le.checkpointInterval), // 300 秒后租约到期, 检查这个租约
+ })
+ le.lg.Info("租约", zap.Int("checkpoint", len(le.leaseCheckpointHeap)), zap.Int("lease", len(le.leaseMap)))
+ }
+}
+
+// 查找到期的检查点
+func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCheckpoint {
+ if le.cp == nil {
+ return nil
+ }
+
+ now := time.Now()
+ var cps []*pb.LeaseCheckpoint
+ for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit {
+ lt := le.leaseCheckpointHeap[0]
+ if lt.time.After(now) { // 过了 检查点的时间
+ return cps
+ }
+ heap.Pop(&le.leaseCheckpointHeap)
+ var l *Lease
+ var ok bool
+ if l, ok = le.leaseMap[lt.id]; !ok {
+ continue
+ }
+ if !now.Before(l.expiry) {
+ continue
+ }
+ remainingTTL := int64(math.Ceil(l.expiry.Sub(now).Seconds())) // 剩余时间
+ if remainingTTL >= l.ttl {
+ continue
+ }
+ if le.lg != nil {
+ le.lg.Debug("检查租约ing", zap.Int64("leaseID", int64(lt.id)), zap.Int64("remainingTTL", remainingTTL))
+ }
+ cps = append(cps, &pb.LeaseCheckpoint{ID: int64(lt.id), RemainingTtl: remainingTTL})
+ }
+ return cps
+}
+
+// Checkpoint 更新租约的剩余时间
+func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ if l, ok := le.leaseMap[id]; ok {
+ // 当检查点时,我们只更新剩余的TTL,Promote 负责将其应用于租赁到期.
+ l.remainingTTL = remainingTTL
+ if le.shouldPersistCheckpoints() { // true
+ l.persistTo(le.b)
+ }
+ if le.isPrimary() {
+ // 根据需要,安排下一个检查点
+ le.scheduleCheckpointIfNeeded(l)
+ }
+ }
+ return nil
+}
diff --git a/etcd/main.go b/etcd/main.go
new file mode 100644
index 00000000000..dd85e316440
--- /dev/null
+++ b/etcd/main.go
@@ -0,0 +1,33 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package main is a simple wrapper of the real etcd entrypoint package
+// (located at github.com/ls-2018/etcd_cn/etcdmain) to ensure that etcd is still
+// "go getable"; e.g. `go get go.etcd.io/etcd` works as expected and
+// builds a binary in $GOBIN/etcd
+//
+// This package should NOT be extended or modified in any way; to modify the
+// etcd binary, work in the `github.com/ls-2018/etcd_cn/etcdmain` package.
+//
+package main
+
+import (
+ "os"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdmain"
+)
+
+func main() {
+ etcdmain.Main(os.Args)
+}
diff --git a/etcd/mvcc/backend/backend_bolt.go b/etcd/mvcc/backend/backend_bolt.go
new file mode 100644
index 00000000000..2accec71929
--- /dev/null
+++ b/etcd/mvcc/backend/backend_bolt.go
@@ -0,0 +1,580 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "fmt"
+ "hash/crc32"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ humanize "github.com/dustin/go-humanize"
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+var (
+ defaultBatchLimit = 10000
+ defaultBatchInterval = 100 * time.Millisecond
+
+ defragLimit = 10000
+
+ // initialMmapSize is the initial size of the mmapped region. Setting this larger than
+ // the potential max db size can prevent writer from blocking reader.
+ // This only works for linux.
+ initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
+
+ // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
+ minSnapshotWarningTimeout = 30 * time.Second
+)
+
+type Backend interface {
+ ReadTx() ReadTx // // ReadTx 返回一个读事务.它被主数据路径中的 ConcurrentReadTx 替换
+ BatchTx() BatchTx // 开启写事务
+ ConcurrentReadTx() ReadTx // 主流程中都是使用的这个并发读事务
+ Snapshot() Snapshot // 对db做快照
+ Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error)
+ Size() int64 // DB占用的物理磁盘大小,空间可以预分配,所以不是实际数据大小
+ SizeInUse() int64 // 实际使用的磁盘空间
+ OpenReadTxN() int64 // 返回当前读事务个数
+ Defrag() error // 数据文件整理,会回收已删除key和已更新的key旧版本占用的磁盘
+ ForceCommit() // 强制当前的批处理tx提交
+ Close() error
+}
+
+type Snapshot interface {
+ Size() int64 // 快照的大小
+ WriteTo(w io.Writer) (n int64, err error) // 写快照数据
+ Close() error // 关闭快照
+}
+
+type txReadBufferCache struct {
+ mu sync.Mutex
+ buf *txReadBuffer
+ bufVersion uint64
+}
+
+type (
+ MyBackend = backend
+ backend struct {
+ size int64 // 已经占用的磁盘大小
+ sizeInUse int64 // 实际使用的大小
+ commits int64 // 已提交事务数
+ openReadTxN int64 // 当前开启的读事务数
+ mlock bool // mlock prevents backend database file to be swapped
+ boltdbMu sync.RWMutex // 这里的锁也是隔离下面的db对象;正常的创建bolt.DB事务只需要读锁;但是做 defrag 时候需要写锁隔离
+ db *bolt.DB // 底层存储为boltDB
+ batchInterval time.Duration // 批量写提交间隔 默认100ms
+ batchLimit int // 批量写最大事务数 10000
+ batchTx *batchTxBuffered // 负责写请求
+ readTx *readTx // 负责读请求
+ // txReadBufferCache mirrors "txReadBuffer" within "readTx" -- readTx.baseReadTx.buf.
+ // When creating "concurrentReadTx":
+ // - if the cache is up-to-date, "readTx.baseReadTx.buf" copy can be skipped
+ // - if the cache is empty or outdated, "readTx.baseReadTx.buf" copy is required
+ txReadBufferCache txReadBufferCache
+ stopc chan struct{}
+ donec chan struct{}
+ hooks Hooks
+ lg *zap.Logger
+ }
+)
+
+type BackendConfig struct {
+ Path string // 是指向后端文件的文件路径.
+ BatchInterval time.Duration // 是冲刷BatchTx之前的最长时间
+ BatchLimit int // 是冲刷BatchTx之前的最大puts数
+ BackendFreelistType bolt.FreelistType // 是后端boltdb的freelist类型
+ MmapSize uint64 // 是为后端提供的mmap的字节数.
+ Logger *zap.Logger //
+ UnsafeNoFsync bool `json:"unsafe-no-fsync"` // 禁用所有fsync的使用.
+ Mlock bool // 防止后端数据库文件被调换
+ Hooks Hooks // 在后端事务的生命周期中被执行
+}
+
+func DefaultBackendConfig() BackendConfig {
+ return BackendConfig{
+ BatchInterval: defaultBatchInterval,
+ BatchLimit: defaultBatchLimit,
+ MmapSize: initialMmapSize,
+ }
+}
+
+func New(bcfg BackendConfig) Backend {
+ return newBackend(bcfg)
+}
+
+func NewDefaultBackend(path string) Backend {
+ bcfg := DefaultBackendConfig()
+ bcfg.Path = path
+ return newBackend(bcfg)
+}
+
+func newBackend(bcfg BackendConfig) *backend {
+ if bcfg.Logger == nil {
+ bcfg.Logger = zap.NewNop()
+ }
+
+ bopts := &bolt.Options{}
+ if boltOpenOptions != nil {
+ *bopts = *boltOpenOptions
+ }
+ bopts.InitialMmapSize = bcfg.mmapSize()
+ bopts.FreelistType = bcfg.BackendFreelistType
+ bopts.NoSync = bcfg.UnsafeNoFsync
+ bopts.NoGrowSync = bcfg.UnsafeNoFsync
+ bopts.Mlock = bcfg.Mlock
+
+ db, err := bolt.Open(bcfg.Path, 0o600, bopts)
+ if err != nil {
+ bcfg.Logger.Panic("打开数据库失败", zap.String("path", bcfg.Path), zap.Error(err))
+ }
+
+ b := &backend{
+ db: db,
+
+ batchInterval: bcfg.BatchInterval,
+ batchLimit: bcfg.BatchLimit,
+ mlock: bcfg.Mlock,
+
+ readTx: &readTx{
+ baseReadTx: baseReadTx{
+ buf: txReadBuffer{
+ txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)},
+ bufVersion: 0,
+ },
+ buckets: make(map[BucketID]*bolt.Bucket),
+ txWg: new(sync.WaitGroup),
+ txMu: new(sync.RWMutex),
+ },
+ },
+ txReadBufferCache: txReadBufferCache{
+ mu: sync.Mutex{},
+ bufVersion: 0,
+ buf: nil,
+ },
+
+ stopc: make(chan struct{}),
+ donec: make(chan struct{}),
+
+ lg: bcfg.Logger,
+ }
+
+ b.batchTx = newBatchTxBuffered(b)
+ b.hooks = bcfg.Hooks
+
+ go b.run()
+ return b
+}
+
+// BatchTx 返回当前的批次tx.该tx可以用于读和写操作.
+// 写入的结果可以立即在同一个tx中被检索到.
+// 写入的结果与其他tx隔离直到当前的tx被提交.
+func (b *backend) BatchTx() BatchTx {
+ return b.batchTx
+}
+
+func (b *backend) ReadTx() ReadTx { return b.readTx }
+
+// ConcurrentReadTx 创建并返回一个新的 ReadTx它.
+// A) 创建并保留backend.readTx.txReadBuffer的副本.
+// B) 引用当前批次间隔的 boltdb read Tx(和它的桶缓存).
+func (b *backend) ConcurrentReadTx() ReadTx {
+ // 这里需要读 readTx 的buffer 所以需要读锁 这里的锁占用时间是很低的
+ b.readTx.RLock()
+ defer b.readTx.RUnlock()
+ b.readTx.txWg.Add(1)
+
+ b.txReadBufferCache.mu.Lock()
+
+ curCache := b.txReadBufferCache.buf // 当前的缓存
+ curCacheVer := b.txReadBufferCache.bufVersion // 缓存里的版本
+ curBufVer := b.readTx.buf.bufVersion // 当前的版本
+
+ isEmptyCache := curCache == nil
+ isStaleCache := curCacheVer != curBufVer // 是不是陈旧的缓存
+
+ var buf *txReadBuffer
+ switch {
+ case isEmptyCache: // 缓冲为空
+ // 当持有b.txReadBufferCache.boltdbMu.Lock时执行安全的缓冲区拷贝这只应该运行一次,所以不会有太多的开销
+ curBuf := b.readTx.buf.unsafeCopy()
+ buf = &curBuf
+ case isStaleCache:
+ // 最大化并发,尝试不安全的缓冲区拷贝在复制buffer时释放锁——cache可能会再次失效
+ // 被其他人覆盖.因此,我们需要再次检查readTx缓冲区版本
+ b.txReadBufferCache.mu.Unlock()
+ curBuf := b.readTx.buf.unsafeCopy()
+ b.txReadBufferCache.mu.Lock()
+ buf = &curBuf
+ default:
+ // 既不为空也不过时的缓存,只使用当前缓冲区
+ buf = curCache
+ }
+ if isEmptyCache || curCacheVer == b.txReadBufferCache.bufVersion {
+ b.txReadBufferCache.buf = buf
+ b.txReadBufferCache.bufVersion = curBufVer
+ }
+
+ b.txReadBufferCache.mu.Unlock()
+ // concurrentReadTx 不应该写入它的 txReadBuffer
+ return &concurrentReadTx{
+ baseReadTx: baseReadTx{
+ buf: *buf, // copy一份backend的readTx.buf, 这样就可以不用持有readTx.mu对buffer的保护从而提升读的性能 这里就是空间换时间(锁的竞争)
+ txMu: b.readTx.txMu,
+ tx: b.readTx.tx,
+ buckets: b.readTx.buckets,
+ txWg: b.readTx.txWg,
+ },
+ }
+}
+
+// ForceCommit 强制当前的批处理tx提交.
+func (b *backend) ForceCommit() {
+ b.batchTx.Commit()
+}
+
+// Snapshot 获取一个blot.db快照结构体
+func (b *backend) Snapshot() Snapshot {
+ b.batchTx.Commit()
+
+ b.boltdbMu.RLock()
+ defer b.boltdbMu.RUnlock()
+ tx, err := b.db.Begin(false) // 读事务
+ if err != nil {
+ b.lg.Fatal("开启读事务失败", zap.Error(err))
+ }
+
+ stopc, donec := make(chan struct{}), make(chan struct{})
+ dbBytes := tx.Size() // 返回该事务所看到的当前数据库大小(以字节为单位).
+ go func() {
+ defer close(donec)
+ // sendRateBytes基于1千兆/秒的连接传输快照数据,假设tcp最小吞吐量为100MB/s.
+ var sendRateBytes int64 = 100 * 1024 * 1024
+ warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
+ if warningTimeout < minSnapshotWarningTimeout {
+ warningTimeout = minSnapshotWarningTimeout
+ }
+ start := time.Now()
+ ticker := time.NewTicker(warningTimeout)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ b.lg.Warn("快照传输时间过长", zap.Duration("taking", time.Since(start)),
+ zap.Int64("bytes", dbBytes),
+ zap.String("size", humanize.Bytes(uint64(dbBytes))),
+ )
+ case <-stopc:
+ return
+ }
+ }
+ }()
+
+ return &snapshot{tx, stopc, donec}
+}
+
+func (b *backend) Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error) {
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+ b.boltdbMu.RLock()
+ defer b.boltdbMu.RUnlock()
+ err := b.db.View(func(tx *bolt.Tx) error {
+ c := tx.Cursor()
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("获取桶的hash失败 %s", string(next))
+ }
+ h.Write(next)
+ b.ForEach(func(k, v []byte) error {
+ if ignores != nil && !ignores(next, k) {
+ fmt.Println(string(k), string(v))
+ h.Write(k)
+ h.Write(v)
+ }
+ return nil
+ })
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ return h.Sum32(), nil
+}
+
+func (b *backend) Size() int64 {
+ return atomic.LoadInt64(&b.size)
+}
+
+func (b *backend) SizeInUse() int64 {
+ return atomic.LoadInt64(&b.sizeInUse)
+}
+
+// 提交bolt事务
+func (b *backend) run() {
+ defer close(b.donec)
+ t := time.NewTimer(b.batchInterval) // 100ms 定时提交事务
+ defer t.Stop()
+ for {
+ select {
+ case <-t.C:
+ case <-b.stopc:
+ b.batchTx.CommitAndStop()
+ return
+ }
+ if b.batchTx.safePending() != 0 {
+ b.batchTx.Commit()
+ }
+ t.Reset(b.batchInterval) // 使其重新触发
+ }
+}
+
+func (b *backend) Close() error {
+ close(b.stopc)
+ <-b.donec
+ return b.db.Close()
+}
+
+// Commits returns total number of commits since start
+func (b *backend) Commits() int64 {
+ return atomic.LoadInt64(&b.commits)
+}
+
+// Defrag 碎片整理
+func (b *backend) Defrag() error {
+ return b.defrag()
+}
+
+// 碎片整理
+func (b *backend) defrag() error {
+ now := time.Now()
+
+ // 锁定batchTx以确保没有人在使用以前的tx然后关闭以前正在进行的tx.
+ b.batchTx.Lock()
+ defer b.batchTx.Unlock()
+
+ // 锁定数据库后锁定tx以避免死锁.
+ b.boltdbMu.Lock()
+ defer b.boltdbMu.Unlock()
+
+ // 阻止并发的读请求同时重置TX.
+ b.readTx.Lock()
+ defer b.readTx.Unlock()
+
+ b.batchTx.unsafeCommit(true)
+
+ b.batchTx.tx = nil
+
+ // Create a temporary file to ensure we start with a clean slate.
+ // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup.
+ dir := filepath.Dir(b.db.Path())
+ temp, err := ioutil.TempFile(dir, "db.tmp.*")
+ if err != nil {
+ return err
+ }
+ options := bolt.Options{}
+ if boltOpenOptions != nil {
+ options = *boltOpenOptions
+ }
+ options.OpenFile = func(_ string, _ int, _ os.FileMode) (file *os.File, err error) {
+ return temp, nil
+ }
+ // 不管打开选项是什么,都不要加载tmp db到内存中
+ options.Mlock = false
+ tdbp := temp.Name()
+ tmpdb, err := bolt.Open(tdbp, 0o600, &options)
+ if err != nil {
+ return err
+ }
+
+ dbp := b.db.Path()
+ size1, sizeInUse1 := b.Size(), b.SizeInUse()
+ if b.lg != nil {
+ b.lg.Info(
+ "內存碎片清理中",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes", size1),
+ zap.String("current-db-size", humanize.Bytes(uint64(size1))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse1),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))),
+ )
+ }
+ err = defragdb(b.db, tmpdb, defragLimit)
+ if err != nil {
+ tmpdb.Close()
+ if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil {
+ b.lg.Error("在碎片整理完成后未能删除db.tmp", zap.Error(rmErr))
+ }
+ return err
+ }
+
+ err = b.db.Close()
+ if err != nil {
+ b.lg.Fatal("关闭数据库失败", zap.Error(err))
+ }
+ err = tmpdb.Close()
+ if err != nil {
+ b.lg.Fatal("关闭tmp数据库失败", zap.Error(err))
+ }
+ err = os.Rename(tdbp, dbp)
+ if err != nil {
+ b.lg.Fatal("重命名tmp数据库失败", zap.Error(err))
+ }
+
+ defragmentedBoltOptions := bolt.Options{}
+ if boltOpenOptions != nil {
+ defragmentedBoltOptions = *boltOpenOptions
+ }
+ defragmentedBoltOptions.Mlock = b.mlock
+
+ b.db, err = bolt.Open(dbp, 0o600, &defragmentedBoltOptions)
+ if err != nil {
+ b.lg.Fatal("打开数据库失败", zap.String("path", dbp), zap.Error(err))
+ }
+ b.batchTx.tx = b.unsafeBegin(true)
+
+ b.readTx.reset()
+ b.readTx.tx = b.unsafeBegin(false)
+
+ size := b.readTx.tx.Size()
+ db := b.readTx.tx.DB()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
+
+ took := time.Since(now)
+
+ size2, sizeInUse2 := b.Size(), b.SizeInUse()
+ if b.lg != nil {
+ b.lg.Info(
+ "完成了目录碎片整理工作",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes-diff", size2-size1),
+ zap.Int64("current-db-size-bytes", size2),
+ zap.String("current-db-size", humanize.Bytes(uint64(size2))),
+ zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse2),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))),
+ zap.Duration("took", took),
+ )
+ }
+ return nil
+}
+
+func defragdb(odb, tmpdb *bolt.DB, limit int) error {
+ // open a tx on tmpdb for writes
+ tmptx, err := tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ tmptx.Rollback()
+ }
+ }()
+
+ // open a tx on old db for read
+ tx, err := odb.Begin(false)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ c := tx.Cursor()
+
+ count := 0
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("backend: cannot defrag bucket %s", string(next))
+ }
+
+ tmpb, berr := tmptx.CreateBucketIfNotExists(next)
+ if berr != nil {
+ return berr
+ }
+ tmpb.FillPercent = 0.9 // for bucket2seq write in for each
+
+ if err = b.ForEach(func(k, v []byte) error {
+ count++
+ if count > limit {
+ err = tmptx.Commit()
+ if err != nil {
+ return err
+ }
+ tmptx, err = tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+ tmpb = tmptx.Bucket(next)
+ tmpb.FillPercent = 0.9 // for bucket2seq write in for each
+
+ count = 0
+ }
+ return tmpb.Put(k, v)
+ }); err != nil {
+ return err
+ }
+ }
+
+ return tmptx.Commit()
+}
+
+func (b *backend) begin(write bool) *bolt.Tx {
+ b.boltdbMu.RLock()
+ tx := b.unsafeBegin(write)
+ b.boltdbMu.RUnlock()
+
+ size := tx.Size() // 返回该事务所看到的当前数据库大小(字节). 24576
+ db := tx.DB()
+ stats := db.Stats()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(stats.FreePageN)*int64(db.Info().PageSize))) // 24576-2*4096
+ atomic.StoreInt64(&b.openReadTxN, int64(stats.OpenTxN)) // 当前的的读事务数
+
+ return tx
+}
+
+// 开启写事务?
+func (b *backend) unsafeBegin(write bool) *bolt.Tx {
+ tx, err := b.db.Begin(write)
+ if err != nil {
+ b.lg.Fatal("开启事务失败", zap.Error(err))
+ }
+ return tx
+}
+
+func (b *backend) OpenReadTxN() int64 {
+ return atomic.LoadInt64(&b.openReadTxN)
+}
+
+type snapshot struct {
+ *bolt.Tx
+ stopc chan struct{}
+ donec chan struct{}
+}
+
+func (s *snapshot) Close() error {
+ close(s.stopc)
+ <-s.donec
+ return s.Tx.Rollback()
+}
diff --git a/etcd/mvcc/backend/bolt_batch_tx.go b/etcd/mvcc/backend/bolt_batch_tx.go
new file mode 100644
index 00000000000..b3375929c90
--- /dev/null
+++ b/etcd/mvcc/backend/bolt_batch_tx.go
@@ -0,0 +1,226 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "math"
+ "sync"
+ "sync/atomic"
+
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+type BucketID int
+
+type Bucket interface {
+ ID() BucketID // ID返回一个水桶的唯一标识符.该ID必须不被持久化并且可以在内存地图中作为轻量级的标识符使用.
+ Name() []byte
+ String() string
+ // IsSafeRangeBucket 是一种避免无意中读取重复key的方法;bucket上的覆盖应该只取limit=1,但已知safeerangebucket永远不会覆盖任何键,所以range是安全的.
+ IsSafeRangeBucket() bool // 不要在非键桶上使用unsafeRange
+}
+
+// BatchTx 负责读请求
+type BatchTx interface {
+ ReadTx
+ UnsafeCreateBucket(bucket Bucket)
+ UnsafeDeleteBucket(bucket Bucket)
+ UnsafePut(bucket Bucket, key []byte, value []byte)
+ UnsafeSeqPut(bucket Bucket, key []byte, value []byte)
+ UnsafeDelete(bucket Bucket, key []byte)
+ Commit() // Commit commits a previous tx and begins a new writable one.
+ CommitAndStop() // CommitAndStop commits the previous tx and does not create a new one.
+}
+
+type batchTx struct {
+ sync.Mutex
+ tx *bolt.Tx
+ backend *backend
+ pending int // 当前事务中的写入次数
+}
+
+func (t *batchTx) Lock() {
+ t.Mutex.Lock()
+}
+
+func (t *batchTx) Unlock() {
+ if t.pending >= t.backend.batchLimit {
+ t.commit(false)
+ }
+ t.Mutex.Unlock()
+}
+
+func (t *batchTx) RLock() {
+ panic("unexpected RLock")
+}
+
+func (t *batchTx) RUnlock() {
+ panic("unexpected RUnlock")
+}
+
+func (t *batchTx) UnsafeCreateBucket(bucket Bucket) {
+ _, err := t.tx.CreateBucket(bucket.Name())
+ if err != nil && err != bolt.ErrBucketExists {
+ t.backend.lg.Fatal("创建bucket", zap.Stringer("bucket-name", bucket), zap.Error(err))
+ }
+ t.pending++
+}
+
+func (t *batchTx) UnsafePut(bucket Bucket, key []byte, value []byte) {
+ t.unsafePut(bucket, key, value, false)
+}
+
+// UnsafeSeqPut OK
+func (t *batchTx) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) {
+ t.unsafePut(bucket, key, value, true)
+}
+
+// OK
+func (t *batchTx) unsafePut(bucketType Bucket, key []byte, value []byte, seq bool) {
+ bucket := t.tx.Bucket(bucketType.Name())
+ if bucket == nil {
+ t.backend.lg.Fatal("找不到bolt.db里的桶", zap.Stringer("bucket-name", bucketType), zap.Stack("stack"))
+ }
+ if seq {
+ // 当工作负载大多为仅附加时,增加填充百分比是很有用的.这可以延迟页面分割和减少空间使用.
+ // 告诉bolt 当页面已满时,它应该告诉它做一个 90-10 拆分,而不是 50-50 拆分,这更适合于顺序插入.这样可以让其体积稍小.
+ // 一个例子:使用 FillPercent = 0.9 之前是 103MB,使用之后是64MB,实际数据是22MB.
+ bucket.FillPercent = 0.9
+ }
+ if err := bucket.Put(key, value); err != nil {
+ t.backend.lg.Fatal(
+ "桶写数据失败", zap.Stringer("bucket-name", bucketType), zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+// UnsafeRange 调用法必须持锁
+func (t *batchTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ bucket := t.tx.Bucket(bucketType.Name())
+ if bucket == nil {
+ t.backend.lg.Fatal("无法找到bucket", zap.Stringer("bucket-name", bucketType), zap.Stack("stack"))
+ }
+ return unsafeRange(bucket.Cursor(), key, endKey, limit)
+}
+
+// 从bolt.db 查找k,v
+func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ var isMatch func(b []byte) bool
+ if len(endKey) > 0 {
+ // 判断是不是相等
+ isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
+ } else {
+ isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
+ limit = 1
+ }
+
+ for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
+ vs = append(vs, cv)
+ keys = append(keys, ck)
+ if limit == int64(len(keys)) {
+ break
+ }
+ }
+ return keys, vs
+}
+
+// UnsafeDelete 调用方必须持锁
+func (t *batchTx) UnsafeDelete(bucketType Bucket, key []byte) {
+ bucket := t.tx.Bucket(bucketType.Name())
+ if bucket == nil {
+ t.backend.lg.Fatal(
+ "查找桶失败",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Stack("stack"),
+ )
+ }
+ err := bucket.Delete(key)
+ if err != nil {
+ t.backend.lg.Fatal(
+ "删除一个key失败",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+// Commit commits a previous tx and begins a new writable one.
+func (t *batchTx) Commit() {
+ t.Lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+// CommitAndStop commits the previous tx and does not create a new one.
+func (t *batchTx) CommitAndStop() {
+ t.Lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTx) safePending() int {
+ t.Mutex.Lock()
+ defer t.Mutex.Unlock()
+ return t.pending
+}
+
+func (t *batchTx) commit(stop bool) {
+ // 提交最新的事务
+ if t.tx != nil {
+ if t.pending == 0 && !stop {
+ return
+ }
+ err := t.tx.Commit() // bolt.Commit
+ atomic.AddInt64(&t.backend.commits, 1)
+
+ t.pending = 0
+ if err != nil {
+ t.backend.lg.Fatal("提交事务失败", zap.Error(err))
+ }
+ }
+ if !stop {
+ t.tx = t.backend.begin(true)
+ }
+}
+
+// -------------------------------------------- OVER -------------------------------------------------------------
+
+// UnsafeForEach 调用方必须持锁
+func (t *batchTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error {
+ return unsafeForEach(t.tx, bucket, visitor)
+}
+
+func unsafeForEach(tx *bolt.Tx, bucket Bucket, visitor func(k, v []byte) error) error {
+ if b := tx.Bucket(bucket.Name()); b != nil {
+ return b.ForEach(visitor)
+ }
+ return nil
+}
+
+// UnsafeDeleteBucket 删除桶
+func (t *batchTx) UnsafeDeleteBucket(bucket Bucket) {
+ err := t.tx.DeleteBucket(bucket.Name())
+ if err != nil && err != bolt.ErrBucketNotFound {
+ t.backend.lg.Fatal("删除桶失败", zap.Stringer("bucket-name", bucket), zap.Error(err))
+ }
+ t.pending++
+}
diff --git a/etcd/mvcc/backend/bolt_batch_tx_buffered.go b/etcd/mvcc/backend/bolt_batch_tx_buffered.go
new file mode 100644
index 00000000000..496db8b53b3
--- /dev/null
+++ b/etcd/mvcc/backend/bolt_batch_tx_buffered.go
@@ -0,0 +1,104 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "sync"
+
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+type batchTxBuffered struct {
+ batchTx
+ buf txWriteBuffer
+}
+
+func newBatchTxBuffered(backend *backend) *batchTxBuffered {
+ tx := &batchTxBuffered{
+ batchTx: batchTx{backend: backend},
+ buf: txWriteBuffer{
+ txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)},
+ bucket2seq: make(map[BucketID]bool),
+ },
+ }
+ tx.Commit()
+ return tx
+}
+
+func (t *batchTxBuffered) Unlock() {
+ if t.pending != 0 {
+ t.backend.readTx.Lock() // blocks txReadBuffer for writing.
+ t.buf.writeback(&t.backend.readTx.buf)
+ t.backend.readTx.Unlock()
+ if t.pending >= t.backend.batchLimit {
+ t.commit(false)
+ }
+ }
+ t.batchTx.Unlock()
+}
+
+func (t *batchTxBuffered) Commit() {
+ t.Lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) CommitAndStop() {
+ t.Lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) commit(stop bool) {
+ if t.backend.hooks != nil {
+ t.backend.hooks.OnPreCommitUnsafe(t)
+ }
+
+ // 所有read tx必须是关闭的以获取boltdb提交的rwlock.
+ t.backend.readTx.Lock()
+ t.unsafeCommit(stop)
+ t.backend.readTx.Unlock()
+}
+
+func (t *batchTxBuffered) unsafeCommit(stop bool) {
+ if t.backend.readTx.tx != nil {
+ // 等待所有使用当前boltdb tx的存储读取事务完成,然后关闭boltdb tx
+ go func(tx *bolt.Tx, wg *sync.WaitGroup) {
+ wg.Wait()
+ if err := tx.Rollback(); err != nil {
+ t.backend.lg.Fatal("回滚tx失败", zap.Error(err))
+ }
+ }(t.backend.readTx.tx, t.backend.readTx.txWg)
+ t.backend.readTx.reset()
+ }
+
+ t.batchTx.commit(stop)
+
+ if !stop {
+ t.backend.readTx.tx = t.backend.begin(false)
+ }
+}
+
+func (t *batchTxBuffered) UnsafePut(bucket Bucket, key []byte, value []byte) {
+ t.batchTx.UnsafePut(bucket, key, value)
+ t.buf.put(bucket, key, value)
+}
+
+// UnsafeSeqPut OK
+func (t *batchTxBuffered) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) { // ✅
+ t.batchTx.UnsafeSeqPut(bucket, key, value)
+ t.buf.putSeq(bucket, key, value)
+}
diff --git a/server/storage/backend/config_default.go b/etcd/mvcc/backend/config_default.go
similarity index 96%
rename from server/storage/backend/config_default.go
rename to etcd/mvcc/backend/config_default.go
index fd57c7ca84c..847bd10fd78 100644
--- a/server/storage/backend/config_default.go
+++ b/etcd/mvcc/backend/config_default.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build !linux && !windows
+// +build !linux,!windows
package backend
diff --git a/server/storage/backend/config_linux.go b/etcd/mvcc/backend/config_linux.go
similarity index 100%
rename from server/storage/backend/config_linux.go
rename to etcd/mvcc/backend/config_linux.go
diff --git a/server/storage/backend/config_windows.go b/etcd/mvcc/backend/config_windows.go
similarity index 98%
rename from server/storage/backend/config_windows.go
rename to etcd/mvcc/backend/config_windows.go
index 7bb42f3a289..ba6e5a1284c 100644
--- a/server/storage/backend/config_windows.go
+++ b/etcd/mvcc/backend/config_windows.go
@@ -13,6 +13,7 @@
// limitations under the License.
//go:build windows
+// +build windows
package backend
diff --git a/server/storage/backend/doc.go b/etcd/mvcc/backend/doc.go
similarity index 100%
rename from server/storage/backend/doc.go
rename to etcd/mvcc/backend/doc.go
diff --git a/etcd/mvcc/backend/hooks.go b/etcd/mvcc/backend/hooks.go
new file mode 100644
index 00000000000..5d3695cc604
--- /dev/null
+++ b/etcd/mvcc/backend/hooks.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+type HookFunc func(tx BatchTx)
+
+// Hooks 允许事务有效期内执行的额外逻辑.
+type Hooks interface {
+ OnPreCommitUnsafe(tx BatchTx) // 事务提交前执行的钩子
+}
+
+type hooks struct {
+ onPreCommitUnsafe HookFunc
+}
+
+func (h hooks) OnPreCommitUnsafe(tx BatchTx) {
+ h.onPreCommitUnsafe(tx)
+}
+
+func NewHooks(onPreCommitUnsafe HookFunc) Hooks {
+ return hooks{onPreCommitUnsafe: onPreCommitUnsafe}
+}
diff --git a/etcd/mvcc/backend/read_tx.go b/etcd/mvcc/backend/read_tx.go
new file mode 100644
index 00000000000..e39011de674
--- /dev/null
+++ b/etcd/mvcc/backend/read_tx.go
@@ -0,0 +1,155 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "math"
+ "sync"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+// IsSafeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
+// overwrites on a bucket should only fetch with limit=1, but IsSafeRangeBucket
+// is known to never overwrite any key so range is safe.
+// IsSafeRangeBucket是一个黑科技,用来避免无意中读取重复的键.
+// 对一个桶的覆盖应该只在limit=1的情况下获取,但IsSafeRangeBucket是已知的,永远不会覆盖任何键,所以范围是安全的.
+
+// ReadTx 负责读请求
+type ReadTx interface {
+ Lock()
+ Unlock()
+ RLock()
+ RUnlock()
+ UnsafeRange(bucket Bucket, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
+ UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error // 对指定的桶,所有k,v遍历
+}
+
+// baseReadTx的访问是并发的所以需要读写锁来保护.
+type baseReadTx struct {
+ // 写事务执行End时候需要获取这个写锁然后把写事务的更新写到 baseReadTx 的buffer里面;
+ // 创建 concurrentReadTx 时候需要获取读锁因为需要拷贝buffer
+ mu sync.RWMutex // 保护 txReadBuffer 的访问
+ buf txReadBuffer // 用于加速读效率的缓存 blot.db的记录
+ txMu *sync.RWMutex // 保护下面的tx和buckets
+ tx *bolt.Tx // ?
+ buckets map[BucketID]*bolt.Bucket // 底层bolt.db 每个bucket 的引用
+ txWg *sync.WaitGroup // txWg 保护 tx 在批处理间隔结束时不会被回滚直到使用此 tx 的所有读取完成.
+}
+
+func (baseReadTx *baseReadTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error {
+ dups := make(map[string]struct{})
+ getDups := func(k, v []byte) error {
+ dups[string(k)] = struct{}{}
+ return nil
+ }
+ visitNoDup := func(k, v []byte) error {
+ if _, ok := dups[string(k)]; ok {
+ return nil
+ }
+ return visitor(k, v)
+ }
+ if err := baseReadTx.buf.ForEach(bucket, getDups); err != nil {
+ return err
+ }
+ baseReadTx.txMu.Lock()
+ err := unsafeForEach(baseReadTx.tx, bucket, visitNoDup)
+ baseReadTx.txMu.Unlock()
+ if err != nil {
+ return err
+ }
+ return baseReadTx.buf.ForEach(bucket, visitor)
+}
+
+// UnsafeRange 从blot.db 查找键值对
+func (baseReadTx *baseReadTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if endKey == nil || len(endKey) == 0 {
+ // forbid duplicates for single keys
+ limit = 1
+ }
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ if limit > 1 && !bucketType.IsSafeRangeBucket() {
+ panic("不要在非keys桶上使用unsafeRange")
+ }
+ // 首先从缓存中查询键值对
+ keys, vals := baseReadTx.buf.Range(bucketType, key, endKey, limit)
+ // 检测缓存中返回的键值对是否达到Limit的要求如果达到Limit的指定上限直接返回缓存的查询结果
+ if int64(len(keys)) == limit {
+ return keys, vals
+ }
+
+ // 查找、创建桶
+ bn := bucketType.ID() // key桶的ID是1
+ baseReadTx.txMu.RLock()
+ bucket, ok := baseReadTx.buckets[bn]
+ baseReadTx.txMu.RUnlock()
+ lockHeld := false
+ if !ok {
+ baseReadTx.txMu.Lock()
+ lockHeld = true
+ bucket = baseReadTx.tx.Bucket(bucketType.Name()) // 创建一个桶
+ baseReadTx.buckets[bn] = bucket
+ }
+
+ // 忽略丢失的桶,因为可能已在此批处理中创建
+ if bucket == nil { // 在等锁的时候,另外一个调用创建了该桶,低概率事件
+ if lockHeld {
+ baseReadTx.txMu.Unlock()
+ }
+ return keys, vals
+ }
+ if !lockHeld {
+ baseReadTx.txMu.Lock()
+ }
+ c := bucket.Cursor()
+ baseReadTx.txMu.Unlock()
+ // 将查询缓存的结采与查询 BlotDB 的结果合并 然后返回
+ k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys))) // 刨除在缓存中找到的,剩余的从bolt.db中查找
+ return append(k2, keys...), append(v2, vals...)
+}
+
+// 负责读请求
+type readTx struct {
+ baseReadTx
+}
+
+func (rt *readTx) Lock() { rt.mu.Lock() }
+func (rt *readTx) Unlock() { rt.mu.Unlock() }
+func (rt *readTx) RLock() { rt.mu.RLock() }
+func (rt *readTx) RUnlock() { rt.mu.RUnlock() }
+
+func (rt *readTx) reset() {
+ rt.buf.reset()
+ rt.buckets = make(map[BucketID]*bolt.Bucket)
+ rt.tx = nil
+ rt.txWg = new(sync.WaitGroup)
+}
+
+type concurrentReadTx struct {
+ baseReadTx
+}
+
+func (rt *concurrentReadTx) Lock() {}
+func (rt *concurrentReadTx) Unlock() {}
+
+// RLock is no-op. concurrentReadTx does not need to be locked after it is created.
+func (rt *concurrentReadTx) RLock() {}
+
+// RUnlock signals the end of concurrentReadTx.
+func (rt *concurrentReadTx) RUnlock() {
+ rt.txWg.Done()
+}
diff --git a/server/storage/backend/testing/betesting.go b/etcd/mvcc/backend/testing/betesting.go
similarity index 84%
rename from server/storage/backend/testing/betesting.go
rename to etcd/mvcc/backend/testing/betesting.go
index e42908f9365..cde78b290bd 100644
--- a/server/storage/backend/testing/betesting.go
+++ b/etcd/mvcc/backend/testing/betesting.go
@@ -15,19 +15,18 @@
package betesting
import (
- "os"
+ "io/ioutil"
"path/filepath"
"testing"
"time"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
"github.com/stretchr/testify/assert"
"go.uber.org/zap/zaptest"
-
- "go.etcd.io/etcd/server/v3/storage/backend"
)
func NewTmpBackendFromCfg(t testing.TB, bcfg backend.BackendConfig) (backend.Backend, string) {
- dir, err := os.MkdirTemp(t.TempDir(), "etcd_backend_test")
+ dir, err := ioutil.TempDir(t.TempDir(), "etcd_backend_test")
if err != nil {
panic(err)
}
@@ -39,13 +38,13 @@ func NewTmpBackendFromCfg(t testing.TB, bcfg backend.BackendConfig) (backend.Bac
// NewTmpBackend creates a backend implementation for testing.
func NewTmpBackend(t testing.TB, batchInterval time.Duration, batchLimit int) (backend.Backend, string) {
- bcfg := backend.DefaultBackendConfig(zaptest.NewLogger(t))
+ bcfg := backend.DefaultBackendConfig()
bcfg.BatchInterval, bcfg.BatchLimit = batchInterval, batchLimit
return NewTmpBackendFromCfg(t, bcfg)
}
func NewDefaultTmpBackend(t testing.TB) (backend.Backend, string) {
- return NewTmpBackendFromCfg(t, backend.DefaultBackendConfig(zaptest.NewLogger(t)))
+ return NewTmpBackendFromCfg(t, backend.DefaultBackendConfig())
}
func Close(t testing.TB, b backend.Backend) {
diff --git a/server/storage/backend/tx_buffer.go b/etcd/mvcc/backend/tx_buffer.go
similarity index 81%
rename from server/storage/backend/tx_buffer.go
rename to etcd/mvcc/backend/tx_buffer.go
index 779255b7320..47d587d6699 100644
--- a/server/storage/backend/tx_buffer.go
+++ b/etcd/mvcc/backend/tx_buffer.go
@@ -16,12 +16,13 @@ package backend
import (
"bytes"
+ "fmt"
"sort"
)
const bucketBufferInitialSize = 512
-// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer.
+// txBuffer 处理txWriteBuffer和txReadBuffer之间共享的功能.
type txBuffer struct {
buckets map[BucketID]*bucketBuffer
}
@@ -49,20 +50,6 @@ func (txw *txWriteBuffer) put(bucket Bucket, k, v []byte) {
txw.putInternal(bucket, k, v)
}
-func (txw *txWriteBuffer) putSeq(bucket Bucket, k, v []byte) {
- // TODO: Add (in tests?) verification whether k>b[len(b)]
- txw.putInternal(bucket, k, v)
-}
-
-func (txw *txWriteBuffer) putInternal(bucket Bucket, k, v []byte) {
- b, ok := txw.buckets[bucket.ID()]
- if !ok {
- b = newBucketBuffer()
- txw.buckets[bucket.ID()] = b
- }
- b.add(k, v)
-}
-
func (txw *txWriteBuffer) reset() {
txw.txBuffer.reset()
for k := range txw.bucket2seq {
@@ -94,18 +81,9 @@ func (txw *txWriteBuffer) writeback(txr *txReadBuffer) {
txr.bufVersion++
}
-// txReadBuffer accesses buffered updates.
type txReadBuffer struct {
txBuffer
- // bufVersion is used to check if the buffer is modified recently
- bufVersion uint64
-}
-
-func (txr *txReadBuffer) Range(bucket Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- if b := txr.buckets[bucket.ID()]; b != nil {
- return b.Range(key, endKey, limit)
- }
- return nil, nil
+ bufVersion uint64 // 用于检查缓存最新是否更新了
}
func (txr *txReadBuffer) ForEach(bucket Bucket, visitor func(k, v []byte) error) error {
@@ -115,65 +93,20 @@ func (txr *txReadBuffer) ForEach(bucket Bucket, visitor func(k, v []byte) error)
return nil
}
-// unsafeCopy returns a copy of txReadBuffer, caller should acquire backend.readTx.RLock()
-func (txr *txReadBuffer) unsafeCopy() txReadBuffer {
- txrCopy := txReadBuffer{
- txBuffer: txBuffer{
- buckets: make(map[BucketID]*bucketBuffer, len(txr.txBuffer.buckets)),
- },
- bufVersion: 0,
- }
- for bucketName, bucket := range txr.txBuffer.buckets {
- txrCopy.txBuffer.buckets[bucketName] = bucket.Copy()
- }
- return txrCopy
-}
-
type kv struct {
key []byte
- val []byte
+ val string
}
-// bucketBuffer buffers key-value pairs that are pending commit.
+// bucketBuffer 缓存了等待提交的k-v键值对
type bucketBuffer struct {
- buf []kv
- // used tracks number of elements in use so buf can be reused without reallocation.
- used int
-}
-
-func newBucketBuffer() *bucketBuffer {
- return &bucketBuffer{buf: make([]kv, bucketBufferInitialSize), used: 0}
-}
-
-func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
- f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 }
- idx := sort.Search(bb.used, f)
- if idx < 0 || idx >= bb.used {
- return nil, nil
- }
- if len(endKey) == 0 {
- if bytes.Equal(key, bb.buf[idx].key) {
- keys = append(keys, bb.buf[idx].key)
- vals = append(vals, bb.buf[idx].val)
- }
- return keys, vals
- }
- if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
- return nil, nil
- }
- for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
- if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
- break
- }
- keys = append(keys, bb.buf[i].key)
- vals = append(vals, bb.buf[i].val)
- }
- return keys, vals
+ buf []kv
+ used int // 跟踪使用中的元素数量,这样buf可以重用而不需要重新分配.
}
func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
for i := 0; i < bb.used; i++ {
- if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil {
+ if err := visitor(bb.buf[i].key, []byte(bb.buf[i].val)); err != nil {
return err
}
}
@@ -181,7 +114,7 @@ func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
}
func (bb *bucketBuffer) add(k, v []byte) {
- bb.buf[bb.used].key, bb.buf[bb.used].val = k, v
+ bb.buf[bb.used].key, bb.buf[bb.used].val = k, string(v)
bb.used++
if bb.used == len(bb.buf) {
buf := make([]kv, (3*len(bb.buf))/2)
@@ -193,7 +126,7 @@ func (bb *bucketBuffer) add(k, v []byte) {
// merge merges data from bbsrc into bb.
func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) {
for i := 0; i < bbsrc.used; i++ {
- bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val)
+ bb.add(bbsrc.buf[i].key, []byte(bbsrc.buf[i].val))
}
if bb.used == bbsrc.used {
return
@@ -229,3 +162,76 @@ func (bb *bucketBuffer) Copy() *bucketBuffer {
copy(bbCopy.buf, bb.buf)
return &bbCopy
}
+
+// unsafeCopy 读缓冲区拷贝
+func (txr *txReadBuffer) unsafeCopy() txReadBuffer {
+ txrCopy := txReadBuffer{
+ txBuffer: txBuffer{
+ buckets: make(map[BucketID]*bucketBuffer, len(txr.txBuffer.buckets)),
+ },
+ bufVersion: 0,
+ }
+ for bucketName, bucket := range txr.txBuffer.buckets {
+ txrCopy.txBuffer.buckets[bucketName] = bucket.Copy()
+ }
+ return txrCopy
+}
+
+func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
+ f := func(i int) bool {
+ return bytes.Compare(bb.buf[i].key, key) >= 0
+ }
+ idx := sort.Search(bb.used, f) // 找到第一个返回TRUE的索引
+ if idx < 0 { // 没招到
+ return nil, nil
+ }
+ if len(endKey) == 0 {
+ if bytes.Equal(key, bb.buf[idx].key) {
+ keys = append(keys, bb.buf[idx].key)
+ vals = append(vals, []byte(bb.buf[idx].val))
+ }
+ fmt.Println(fmt.Sprintf("---->get %s:%s", string(bb.buf[idx].key), bb.buf[idx].val))
+ return keys, vals
+ }
+ // 缓存中没有对应的key
+ // bb.buf[idx].key > endKey
+ if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
+ return nil, nil
+ }
+ for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
+ // bb.buf[idx].key > endKey
+ if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
+ break
+ }
+ keys = append(keys, bb.buf[i].key)
+ vals = append(vals, []byte(bb.buf[i].val))
+ }
+ return keys, vals
+}
+
+// Range OK
+func (txr *txReadBuffer) Range(bucket Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if b := txr.buckets[bucket.ID()]; b != nil {
+ return b.Range(key, endKey, limit)
+ }
+ return nil, nil
+}
+
+// 将k,v写入到db之后,会写入到缓存中
+func (txw *txWriteBuffer) putSeq(bucket Bucket, k, v []byte) {
+ txw.putInternal(bucket, k, v)
+}
+
+// 创建缓存结构
+func (txw *txWriteBuffer) putInternal(bucket Bucket, k, v []byte) {
+ b, ok := txw.buckets[bucket.ID()]
+ if !ok {
+ b = newBucketBuffer()
+ txw.buckets[bucket.ID()] = b
+ }
+ b.add(k, v)
+}
+
+func newBucketBuffer() *bucketBuffer { // 512
+ return &bucketBuffer{buf: make([]kv, bucketBufferInitialSize), used: 0}
+}
diff --git a/etcd/mvcc/buckets/over_bucket.go b/etcd/mvcc/buckets/over_bucket.go
new file mode 100644
index 00000000000..dfc649912c1
--- /dev/null
+++ b/etcd/mvcc/buckets/over_bucket.go
@@ -0,0 +1,62 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package buckets
+
+import (
+ "bytes"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+)
+
+var (
+ Key = backend.Bucket(bucket{id: 1, name: []byte("key"), safeRangeBucket: true})
+ Meta = backend.Bucket(bucket{id: 2, name: []byte("meta"), safeRangeBucket: false})
+ Lease = backend.Bucket(bucket{id: 3, name: []byte("lease"), safeRangeBucket: false})
+ Alarm = backend.Bucket(bucket{id: 4, name: []byte("alarm"), safeRangeBucket: false})
+ Cluster = backend.Bucket(bucket{id: 5, name: []byte("cluster"), safeRangeBucket: false})
+
+ Members = backend.Bucket(bucket{id: 10, name: []byte("members"), safeRangeBucket: false})
+ MembersRemoved = backend.Bucket(bucket{id: 11, name: []byte("members_removed"), safeRangeBucket: false})
+
+ Auth = backend.Bucket(bucket{id: 20, name: []byte("auth"), safeRangeBucket: false})
+ AuthUsers = backend.Bucket(bucket{id: 21, name: []byte("authUsers"), safeRangeBucket: false})
+ AuthRoles = backend.Bucket(bucket{id: 22, name: []byte("authRoles"), safeRangeBucket: false})
+
+ Test = backend.Bucket(bucket{id: 100, name: []byte("test"), safeRangeBucket: false})
+)
+
+type bucket struct {
+ id backend.BucketID
+ name []byte
+ safeRangeBucket bool
+}
+
+func (b bucket) ID() backend.BucketID { return b.id }
+func (b bucket) Name() []byte { return b.name }
+func (b bucket) String() string { return string(b.Name()) }
+func (b bucket) IsSafeRangeBucket() bool { return b.safeRangeBucket }
+
+var (
+ MetaConsistentIndexKeyName = []byte("consistent_index")
+ MetaTermKeyName = []byte("term")
+)
+
+// DefaultIgnores 定义在哈希检查中要忽略的桶和键.
+func DefaultIgnores(bucket, key []byte) bool {
+ // consistent index & term might be changed due to v2 internal sync, which
+ // is not controllable by the user.
+ return bytes.Compare(bucket, Meta.Name()) == 0 &&
+ (bytes.Compare(key, MetaTermKeyName) == 0 || bytes.Compare(key, MetaConsistentIndexKeyName) == 0)
+}
diff --git a/etcd/mvcc/index.go b/etcd/mvcc/index.go
new file mode 100644
index 00000000000..b018016bcf2
--- /dev/null
+++ b/etcd/mvcc/index.go
@@ -0,0 +1,291 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/google/btree"
+ "go.uber.org/zap"
+)
+
+type index interface {
+ Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
+ Range(key, end []byte, atRev int64) ([][]byte, []revision)
+ Revisions(key, end []byte, atRev int64, limit int) ([]revision, int)
+ CountRevisions(key, end []byte, atRev int64) int
+ Put(key []byte, rev revision)
+ Tombstone(key []byte, rev revision) error
+ RangeSince(key, end []byte, rev int64) []revision
+ Compact(rev int64) map[revision]struct{}
+ Keep(rev int64) map[revision]struct{}
+ Equal(b index) bool
+ Insert(ki *keyIndex)
+ KeyIndex(ki *keyIndex) *keyIndex
+}
+
+type treeIndex struct {
+ sync.RWMutex
+ tree *btree.BTree
+ lg *zap.Logger
+}
+
+func newTreeIndex(lg *zap.Logger) index {
+ return &treeIndex{
+ tree: btree.New(32),
+ lg: lg,
+ }
+}
+
+func (ti *treeIndex) Put(key []byte, rev revision) {
+ keyi := &keyIndex{Key: string(key)}
+
+ ti.Lock()
+ defer ti.Unlock()
+ item := ti.tree.Get(keyi)
+ if item == nil {
+ keyi.put(ti.lg, rev.Main, rev.Sub)
+ ti.tree.ReplaceOrInsert(keyi)
+ return
+ }
+ okeyi := item.(*keyIndex)
+ okeyi.put(ti.lg, rev.Main, rev.Sub)
+ marshal, _ := json.Marshal(okeyi)
+ fmt.Println(string(marshal))
+}
+
+// 遍历
+func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex) bool) {
+ keyi, endi := &keyIndex{Key: string(key)}, &keyIndex{Key: string(end)}
+
+ ti.RLock()
+ defer ti.RUnlock()
+ // 对树中[pivot, last]范围内的每个值调用迭代器,直到迭代器返回false.
+ // 假如获取前缀为b 那么结束就是c , 因为是自增的
+
+ ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
+ if len(endi.Key) > 0 && !item.Less(endi) {
+ return false
+ }
+ fmt.Println("keyIndex ---->:", item.(*keyIndex).Key)
+ if !f(item.(*keyIndex)) {
+ return false
+ }
+ return true
+ })
+}
+
+func (ti *treeIndex) CountRevisions(key, end []byte, atRev int64) int {
+ if end == nil || len(end) == 0 {
+ _, _, _, err := ti.Get(key, atRev)
+ if err != nil {
+ return 0
+ }
+ return 1
+ }
+ total := 0
+ ti.visit(key, end, func(ki *keyIndex) bool {
+ if _, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ total++
+ }
+ return true
+ })
+ return total
+}
+
+func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) {
+ if end == nil || len(end) == 0 {
+ rev, _, _, err := ti.Get(key, atRev)
+ if err != nil {
+ return nil, nil
+ }
+ return [][]byte{key}, []revision{rev}
+ }
+ ti.visit(key, end, func(ki *keyIndex) bool {
+ if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ revs = append(revs, rev)
+ keys = append(keys, []byte(ki.Key))
+ }
+ return true
+ })
+ return keys, revs
+}
+
+func (ti *treeIndex) Tombstone(key []byte, rev revision) error {
+ keyi := &keyIndex{Key: string(key)}
+
+ ti.Lock()
+ defer ti.Unlock()
+ item := ti.tree.Get(keyi)
+ if item == nil {
+ return ErrRevisionNotFound
+ }
+
+ ki := item.(*keyIndex)
+ return ki.tombstone(ti.lg, rev.Main, rev.Sub)
+}
+
+// RangeSince returns all revisions from Key(including) to end(excluding)
+// at or after the given rev. The returned slice is sorted in the order
+// of revision.
+func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
+ keyi := &keyIndex{Key: string(key)}
+
+ ti.RLock()
+ defer ti.RUnlock()
+
+ if end == nil || len(end) == 0 {
+ item := ti.tree.Get(keyi)
+ if item == nil {
+ return nil
+ }
+ keyi = item.(*keyIndex)
+ return keyi.since(ti.lg, rev)
+ }
+
+ endi := &keyIndex{Key: string(end)}
+ var revs []revision
+ ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
+ if len(endi.Key) > 0 && !item.Less(endi) {
+ return false
+ }
+ curKeyi := item.(*keyIndex)
+ revs = append(revs, curKeyi.since(ti.lg, rev)...)
+ return true
+ })
+ sort.Sort(revisions(revs))
+
+ return revs
+}
+
+func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
+ available := make(map[revision]struct{})
+ ti.lg.Info("compact tree index", zap.Int64("revision", rev))
+ ti.Lock()
+ // 为了避免压缩工作影响读写性能,
+ clone := ti.tree.Clone()
+ ti.Unlock()
+
+ clone.Ascend(func(item btree.Item) bool {
+ keyi := item.(*keyIndex)
+ // Lock is needed here to prevent modification to the keyIndex while
+ // compaction is going on or revision added to empty before deletion
+ ti.Lock()
+ keyi.compact(ti.lg, rev, available)
+ if keyi.isEmpty() {
+ item := ti.tree.Delete(keyi)
+ if item == nil {
+ ti.lg.Panic("failed to delete during compaction")
+ }
+ }
+ ti.Unlock()
+ return true
+ })
+ return available
+}
+
+// Keep 查找在给定版本之后的所有修订.
+func (ti *treeIndex) Keep(rev int64) map[revision]struct{} {
+ available := make(map[revision]struct{})
+ ti.RLock()
+ defer ti.RUnlock()
+ ti.tree.Ascend(func(i btree.Item) bool {
+ keyi := i.(*keyIndex)
+ keyi.keep(rev, available)
+ return true
+ })
+ return available
+}
+
+func (ti *treeIndex) Equal(bi index) bool {
+ b := bi.(*treeIndex)
+
+ if ti.tree.Len() != b.tree.Len() {
+ return false
+ }
+
+ equal := true
+
+ ti.tree.Ascend(func(item btree.Item) bool {
+ aki := item.(*keyIndex)
+ bki := b.tree.Get(item).(*keyIndex)
+ if !aki.equal(bki) {
+ equal = false
+ return false
+ }
+ return true
+ })
+
+ return equal
+}
+
+// ---------------------------------------- OVER --------------------------------------------------------------
+
+// Get 获取某个key的某个版本的索引号 ,
+func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) {
+ keyi := &keyIndex{Key: string(key)}
+ ti.RLock()
+ defer ti.RUnlock()
+ // 判断key 在不在btree里
+ if keyi = ti.keyIndex(keyi); keyi == nil {
+ return revision{}, revision{}, 0, ErrRevisionNotFound
+ }
+ return keyi.get(ti.lg, atRev) // 获取修订版本
+}
+
+// Revisions 获取所有修正版本
+func (ti *treeIndex) Revisions(key, end []byte, atRev int64, limit int) (revs []revision, total int) {
+ if end == nil || len(end) == 0 {
+ rev, _, _, err := ti.Get(key, atRev)
+ if err != nil {
+ return nil, 0
+ }
+ return []revision{rev}, 1
+ }
+ // 指定了end
+ ti.visit(key, end, func(ki *keyIndex) bool {
+ if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ if limit <= 0 || len(revs) < limit {
+ revs = append(revs, rev)
+ }
+ total++
+ }
+ return true
+ })
+ return revs, total
+}
+
+func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex {
+ ti.RLock()
+ defer ti.RUnlock()
+ return ti.keyIndex(keyi)
+}
+
+// 判断有没这个key,
+func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
+ if item := ti.tree.Get(keyi); item != nil {
+ return item.(*keyIndex)
+ }
+ return nil
+}
+
+func (ti *treeIndex) Insert(ki *keyIndex) {
+ ti.Lock()
+ defer ti.Unlock()
+ ti.tree.ReplaceOrInsert(ki)
+}
diff --git a/etcd/mvcc/key_index.go b/etcd/mvcc/key_index.go
new file mode 100644
index 00000000000..bb4eec7b6af
--- /dev/null
+++ b/etcd/mvcc/key_index.go
@@ -0,0 +1,337 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/google/btree"
+ "go.uber.org/zap"
+)
+
+var ErrRevisionNotFound = errors.New("mvcc: 修订版本没有找到")
+
+// keyIndex
+// key的删除操作将在末尾追加删除版本
+// 当前代,并创建一个新的空代.
+type keyIndex struct {
+ Key string // Key
+ Modified revision // 一个key 最新修改的revision .
+ Generations []generation // 每次新建都会创建一个,删除然后新建也会生成一个
+}
+
+// generation 包含一个key的多个版本.
+type generation struct {
+ VersionCount int64 // 记录对当前key 有几个版本
+ Created revision // 第一次创建时的索引信息
+ Revs []revision // 当值存在以后,对该值的修改记录
+}
+
+type revision struct {
+ Main int64 // 一个全局递增的主版本号,随put/txn/delete事务递增,一个事务内的key main版本号是一致的
+ Sub int64 // 一个事务内的子版本号,从0开始随事务内put/delete操作递增
+}
+
+func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int64) {
+ if len(ki.Generations) != 0 {
+ lg.Panic(
+ "'restore' got an unexpected non-empty Generations",
+ zap.Int("Generations-size", len(ki.Generations)),
+ )
+ }
+ ki.Modified = modified
+ g := generation{Created: created, VersionCount: ver, Revs: []revision{modified}}
+ ki.Generations = append(ki.Generations, g)
+}
+
+// tombstone puts a revision, pointing to a tombstone, to the keyIndex.
+// It also creates a new empty generation in the keyIndex.
+// It returns ErrRevisionNotFound when tombstone on an empty generation.
+func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error {
+ // 当然如果 keyIndex 中的最大版本号被打了删除标记 (tombstone), 就会从 treeIndex 中删除这个 keyIndex,否则会出现内存泄露.
+ if ki.isEmpty() {
+ lg.Panic(
+ "'tombstone' got an unexpected empty keyIndex",
+ zap.String("Key", string(ki.Key)),
+ )
+ }
+ if ki.Generations[len(ki.Generations)-1].isEmpty() {
+ return ErrRevisionNotFound
+ }
+ ki.put(lg, main, sub)
+ ki.Generations = append(ki.Generations, generation{})
+ return nil
+}
+
+// since returns revisions since the given rev. Only the revision with the
+// largest Sub revision will be returned if multiple revisions have the same
+// Main revision.
+func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision {
+ if ki.isEmpty() {
+ lg.Panic("'since' 得到一个意外的空keyIndex", zap.String("Key", ki.Key))
+ }
+ since := revision{rev, 0}
+ var gi int
+ // find the Generations to start checking
+ for gi = len(ki.Generations) - 1; gi > 0; gi-- {
+ g := ki.Generations[gi]
+ if g.isEmpty() {
+ continue
+ }
+ if since.GreaterThan(g.Created) {
+ break
+ }
+ }
+
+ var revs []revision
+ var last int64
+ for ; gi < len(ki.Generations); gi++ {
+ for _, r := range ki.Generations[gi].Revs {
+ if since.GreaterThan(r) {
+ continue
+ }
+ if r.Main == last {
+ // replace the revision with a new one that has higher Sub value,
+ // because the original one should not be seen by external
+ revs[len(revs)-1] = r
+ continue
+ }
+ revs = append(revs, r)
+ last = r.Main
+ }
+ }
+ return revs
+}
+
+// compact compacts a keyIndex by removing the versions with smaller or equal
+// revision than the given atRev except the largest one (If the largest one is
+// a tombstone, it will not be kept).
+// If a generation becomes empty during compaction, it will be removed.
+func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]struct{}) {
+ if ki.isEmpty() {
+ lg.Panic(
+ "'compact' got an unexpected empty keyIndex",
+ zap.String("Key", string(ki.Key)),
+ )
+ }
+
+ genIdx, revIndex := ki.doCompact(atRev, available)
+
+ g := &ki.Generations[genIdx]
+ if !g.isEmpty() {
+ // remove the previous contents.
+ if revIndex != -1 {
+ g.Revs = g.Revs[revIndex:]
+ }
+ // remove any tombstone
+ if len(g.Revs) == 1 && genIdx != len(ki.Generations)-1 {
+ delete(available, g.Revs[0])
+ genIdx++
+ }
+ }
+
+ // remove the previous Generations.
+ ki.Generations = ki.Generations[genIdx:]
+}
+
+// keep finds the revision to be kept if compact is called at given atRev.
+func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) {
+ if ki.isEmpty() {
+ return
+ }
+
+ genIdx, revIndex := ki.doCompact(atRev, available)
+ g := &ki.Generations[genIdx]
+ if !g.isEmpty() {
+ // remove any tombstone
+ if revIndex == len(g.Revs)-1 && genIdx != len(ki.Generations)-1 {
+ delete(available, g.Revs[revIndex])
+ }
+ }
+}
+
+func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) {
+ // walk until reaching the first revision smaller or equal to "atRev",
+ // and add the revision to the available map
+ f := func(rev revision) bool {
+ if rev.Main <= atRev {
+ available[rev] = struct{}{}
+ return false
+ }
+ return true
+ }
+
+ genIdx, g := 0, &ki.Generations[0]
+ // find first generation includes atRev or Created after atRev
+ for genIdx < len(ki.Generations)-1 {
+ if tomb := g.Revs[len(g.Revs)-1].Main; tomb > atRev {
+ break
+ }
+ genIdx++
+ g = &ki.Generations[genIdx]
+ }
+
+ revIndex = g.walk(f)
+
+ return genIdx, revIndex
+}
+
+// --------------------------------------------- OVER ---------------------------------------------------------------
+
+// get 获取满足给定atRev的键的修改、创建的revision和版本.Rev必须大于或等于给定的atRev.
+func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision, ver int64, err error) {
+ if ki.isEmpty() { // 判断有没有修订版本
+ lg.Panic("'get'得到一个意外的空keyIndex", zap.String("Key", ki.Key))
+ }
+ g := ki.findGeneration(atRev)
+ if g.isEmpty() {
+ return revision{}, revision{}, 0, ErrRevisionNotFound
+ }
+
+ n := g.walk(func(rev revision) bool { return rev.Main > atRev }) // 返回第一个小于等于 该修订版本的最新的索引
+ if n != -1 {
+ return g.Revs[n], g.Created, g.VersionCount - int64(len(g.Revs)-n-1), nil
+ }
+
+ return revision{}, revision{}, 0, ErrRevisionNotFound
+}
+
+func (ki *keyIndex) Less(b btree.Item) bool {
+ return strings.Compare(ki.Key, b.(*keyIndex).Key) == -1
+}
+
+func (ki *keyIndex) equal(b *keyIndex) bool {
+ if !strings.EqualFold(ki.Key, b.Key) {
+ return false
+ }
+ if ki.Modified != b.Modified {
+ return false
+ }
+ if len(ki.Generations) != len(b.Generations) {
+ return false
+ }
+ for i := range ki.Generations {
+ ag, bg := ki.Generations[i], b.Generations[i]
+ if !ag.equal(bg) {
+ return false
+ }
+ }
+ return true
+}
+
+func (ki *keyIndex) String() string {
+ var s string
+ for _, g := range ki.Generations {
+ s += g.String()
+ }
+ return s
+}
+
+func (g *generation) isEmpty() bool { return g == nil || len(g.Revs) == 0 }
+
+// 遍历返回符合条件的索引,倒序遍历
+func (g *generation) walk(f func(rev revision) bool) int {
+ l := len(g.Revs)
+ for i := range g.Revs {
+ ok := f(g.Revs[l-i-1])
+ if !ok {
+ return l - i - 1
+ }
+ }
+ return -1
+}
+
+func (g generation) equal(b generation) bool {
+ if g.VersionCount != b.VersionCount {
+ return false
+ }
+ if len(g.Revs) != len(b.Revs) {
+ return false
+ }
+
+ for i := range g.Revs {
+ ar, br := g.Revs[i], b.Revs[i]
+ if ar != br {
+ return false
+ }
+ }
+ return true
+}
+
+func (g *generation) String() string {
+ return fmt.Sprintf("g: 创建[%d] 版本数[%d], 修订记录 %#v\n", g.Created, g.VersionCount, g.Revs)
+}
+
+// OK
+func (ki *keyIndex) isEmpty() bool {
+ // 只有一个历史版本,且
+ return len(ki.Generations) == 1 && ki.Generations[0].isEmpty()
+}
+
+// findGeneration 找到给定rev所属的keyIndex的生成.如果给定的rev在两代之间,这意味着在给定的rev上键不存在,它将返回nil.
+// 如果修订版本是接下来要写的,就返回当前代
+func (ki *keyIndex) findGeneration(rev int64) *generation {
+ lastg := len(ki.Generations) - 1
+ cg := lastg
+ // 倒着查找
+ for cg >= 0 {
+ if len(ki.Generations[cg].Revs) == 0 {
+ cg--
+ continue
+ }
+ g := ki.Generations[cg]
+ if cg != lastg {
+ // 每次生成的key的最新修订版本
+ // 不是最新的一组,但最大的都比 rev
+ if tomb := g.Revs[len(g.Revs)-1].Main; tomb <= rev {
+ // tomb应该是删除的代数
+ return nil
+ }
+ }
+ // 0 rev last
+ if g.Revs[0].Main <= rev {
+ // 找到对应修订版本 所属的gen版本
+ return &ki.Generations[cg]
+ }
+ cg--
+ }
+ return nil
+}
+
+// put 将一个修订放到keyIndex中.
+func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) {
+ rev := revision{Main: main, Sub: sub}
+ if !rev.GreaterThan(ki.Modified) {
+ lg.Panic(
+ "'put'有一个意想不到的小修改",
+ zap.Int64("given-revision-Main", rev.Main),
+ zap.Int64("given-revision-Sub", rev.Sub),
+ zap.Int64("Modified-revision-Main", ki.Modified.Main),
+ zap.Int64("Modified-revision-Sub", ki.Modified.Sub),
+ )
+ }
+ if len(ki.Generations) == 0 {
+ ki.Generations = append(ki.Generations, generation{})
+ }
+ g := &ki.Generations[len(ki.Generations)-1]
+ if len(g.Revs) == 0 { // create a new Key
+ g.Created = rev
+ }
+ g.Revs = append(g.Revs, rev)
+ g.VersionCount++
+ ki.Modified = rev
+}
diff --git a/etcd/mvcc/kv2.go b/etcd/mvcc/kv2.go
new file mode 100644
index 00000000000..c61a9e90d4b
--- /dev/null
+++ b/etcd/mvcc/kv2.go
@@ -0,0 +1 @@
+package mvcc
diff --git a/etcd/mvcc/kvstore.go b/etcd/mvcc/kvstore.go
new file mode 100644
index 00000000000..e8a91b03955
--- /dev/null
+++ b/etcd/mvcc/kvstore.go
@@ -0,0 +1,537 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "math"
+ "sync"
+
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/pkg/schedule"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+
+ "go.uber.org/zap"
+)
+
+var (
+ scheduledCompactKeyName = []byte("scheduledCompactRev")
+ finishedCompactKeyName = []byte("finishedCompactRev")
+
+ ErrCompacted = errors.New("mvcc: 指定的修订版本已被压缩")
+ ErrFutureRev = errors.New("mvcc: 指定的修订版本还没有")
+)
+
+const (
+ // markedRevBytesLen is the byte length of marked revision.
+ // The first `revBytesLen` bytes represents a normal revision. The last
+ // one byte is the mark.
+ markedRevBytesLen = revBytesLen + 1
+ markBytePosition = markedRevBytesLen - 1
+ markTombstone byte = 't'
+)
+
+var (
+ restoreChunkKeys = 10000 // non-const for testing
+ defaultCompactBatchLimit = 1000
+)
+
+type StoreConfig struct {
+ CompactionBatchLimit int
+}
+
+type store struct {
+ ReadView
+ WriteView
+ cfg StoreConfig
+ // mu read locks for txns and write locks for non-txn store changes.
+ mu sync.RWMutex
+ b backend.Backend
+ kvindex index
+ le lease.Lessor // 租约管理器
+ revMu sync.RWMutex // 保护currentRev和compactMainRev
+ currentRev int64 // 是最后一个已完成事务的修订
+ compactMainRev int64
+ fifoSched schedule.Scheduler
+ stopc chan struct{}
+ lg *zap.Logger
+}
+
+// NewStore returns a new store. It is useful to create a store inside
+// mvcc pkg. It should only be used for testing externally.
+func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ if cfg.CompactionBatchLimit == 0 {
+ cfg.CompactionBatchLimit = defaultCompactBatchLimit
+ }
+ s := &store{
+ cfg: cfg,
+ b: b,
+ kvindex: newTreeIndex(lg),
+
+ le: le,
+
+ currentRev: 1,
+ compactMainRev: -1,
+
+ fifoSched: schedule.NewFIFOScheduler(),
+
+ stopc: make(chan struct{}),
+
+ lg: lg,
+ }
+ s.ReadView = &readView{s}
+ s.WriteView = &writeView{s}
+ if s.le != nil {
+ s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
+ }
+
+ tx := s.b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(buckets.Key)
+ tx.UnsafeCreateBucket(buckets.Meta)
+ tx.Unlock()
+ s.b.ForceCommit()
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if err := s.restore(); err != nil {
+ // TODO: return the error instead of panic here?
+ panic("failed to recover store from backend")
+ }
+
+ return s
+}
+
+// 返回读事务、 并发读,串行读
+func (s *store) Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead {
+ s.mu.RLock()
+ s.revMu.RLock()
+ // 对于只读的工作负载,我们通过复制事务读缓冲区来使用共享缓冲区提高并发性
+ // 对于写/写/读事务,我们使用共享缓冲区
+ // 而不是复制事务读缓冲区,以避免事务开销.
+ var tx backend.ReadTx
+ if mode == ConcurrentReadTxMode {
+ tx = s.b.ConcurrentReadTx()
+ } else {
+ tx = s.b.ReadTx()
+ }
+
+ tx.RLock()
+ firstRev, rev := s.compactMainRev, s.currentRev
+ s.revMu.RUnlock()
+ return &storeTxnRead{s, tx, firstRev, rev, trace}
+}
+
+func (s *store) Write(trace *traceutil.Trace) TxnWrite {
+ s.mu.RLock()
+ tx := s.b.BatchTx()
+ tx.Lock()
+ tw := &storeTxnWrite{
+ storeTxnRead: storeTxnRead{s, tx, 0, 0, trace},
+ tx: tx,
+ beginRev: s.currentRev,
+ changes: make([]mvccpb.KeyValue, 0, 4),
+ }
+ return tw
+}
+
+func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
+ if ctx == nil || ctx.Err() != nil {
+ select {
+ case <-s.stopc:
+ default:
+ // fix deadlock in mvcc,for more information, please refer to pr 11817.
+ // s.stopc is only updated in restore operation, which is called by apply
+ // snapshot call, compaction and apply snapshot requests are serialized by
+ // raft, and do not happen at the same time.
+ s.mu.Lock()
+ f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
+ s.fifoSched.Schedule(f)
+ s.mu.Unlock()
+ }
+ return
+ }
+ close(ch)
+}
+
+// Hash OK
+func (s *store) Hash() (hash uint32, revision int64, err error) {
+ // TODO: hash和revision可能不一致,一个可能的解决方案是在函数的开头添加s.revMu.RLock(),这是昂贵的
+ s.b.ForceCommit()
+ h, err := s.b.Hash(buckets.DefaultIgnores)
+
+ return h, s.currentRev, err
+}
+
+// HashByRev 计算所有MVCC修订到给定修订的哈希值.
+func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) {
+ s.mu.RLock()
+ s.revMu.RLock()
+ compactRev, currentRev = s.compactMainRev, s.currentRev
+ s.revMu.RUnlock()
+
+ if rev > 0 && rev <= compactRev {
+ s.mu.RUnlock()
+ return 0, 0, compactRev, ErrCompacted
+ } else if rev > 0 && rev > currentRev {
+ s.mu.RUnlock()
+ return 0, currentRev, 0, ErrFutureRev
+ }
+
+ if rev == 0 {
+ rev = currentRev
+ }
+ keep := s.kvindex.Keep(rev)
+
+ tx := s.b.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ s.mu.RUnlock()
+
+ upper := revision{Main: rev + 1}
+ lower := revision{Main: compactRev + 1}
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+ h.Write(buckets.Key.Name())
+ err = tx.UnsafeForEach(buckets.Key, func(k, v []byte) error {
+ kr := bytesToRev(k)
+ if !upper.GreaterThan(kr) {
+ return nil
+ }
+ // skip revisions that are scheduled for deletion
+ // due to compacting; don't skip if there isn't one.
+ if lower.GreaterThan(kr) && len(keep) > 0 {
+ if _, ok := keep[kr]; !ok {
+ return nil
+ }
+ }
+ h.Write(k)
+ h.Write(v)
+ return nil
+ })
+ hash = h.Sum32()
+
+ return hash, currentRev, compactRev, err
+}
+
+func (s *store) updateCompactRev(rev int64) (<-chan struct{}, error) {
+ s.revMu.Lock()
+ if rev <= s.compactMainRev {
+ ch := make(chan struct{})
+ f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
+ s.fifoSched.Schedule(f)
+ s.revMu.Unlock()
+ return ch, ErrCompacted
+ }
+ if rev > s.currentRev {
+ s.revMu.Unlock()
+ return nil, ErrFutureRev
+ }
+
+ s.compactMainRev = rev
+
+ rbytes := newRevBytes()
+ revToBytes(revision{Main: rev}, rbytes)
+
+ tx := s.b.BatchTx()
+ tx.Lock()
+ tx.UnsafePut(buckets.Meta, scheduledCompactKeyName, rbytes)
+ tx.Unlock()
+ // ensure that desired compaction is persisted
+ s.b.ForceCommit()
+
+ s.revMu.Unlock()
+
+ return nil, nil
+}
+
+func (s *store) compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
+ ch := make(chan struct{})
+ j := func(ctx context.Context) {
+ if ctx.Err() != nil {
+ s.compactBarrier(ctx, ch)
+ return
+ }
+ keep := s.kvindex.Compact(rev)
+ if !s.scheduleCompaction(rev, keep) { // 删除bolt.db中旧版本
+ s.compactBarrier(context.TODO(), ch)
+ return
+ }
+ close(ch)
+ }
+
+ s.fifoSched.Schedule(j)
+ trace.Step("schedule compaction")
+ return ch, nil
+}
+
+func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) {
+ ch, err := s.updateCompactRev(rev)
+ if err != nil {
+ return ch, err
+ }
+
+ return s.compact(traceutil.TODO(), rev)
+}
+
+func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
+ s.mu.Lock()
+
+ ch, err := s.updateCompactRev(rev)
+ trace.Step("check and update compact revision")
+ if err != nil {
+ s.mu.Unlock()
+ return ch, err
+ }
+ s.mu.Unlock()
+
+ return s.compact(trace, rev) // Compact
+}
+
+func (s *store) Commit() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.b.ForceCommit()
+}
+
+func (s *store) Restore(b backend.Backend) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ close(s.stopc)
+ s.fifoSched.Stop()
+
+ s.b = b
+ s.kvindex = newTreeIndex(s.lg)
+
+ {
+ // During restore the metrics might report 'special' values
+ s.revMu.Lock()
+ s.currentRev = 1
+ s.compactMainRev = -1
+ s.revMu.Unlock()
+ }
+
+ s.fifoSched = schedule.NewFIFOScheduler()
+ s.stopc = make(chan struct{})
+
+ return s.restore()
+}
+
+func (s *store) restore() error {
+ min, max := newRevBytes(), newRevBytes()
+ revToBytes(revision{Main: 1}, min)
+ revToBytes(revision{Main: math.MaxInt64, Sub: math.MaxInt64}, max)
+
+ keyToLease := make(map[string]lease.LeaseID)
+
+ // restore index
+ tx := s.b.BatchTx()
+ tx.Lock()
+
+ _, finishedCompactBytes := tx.UnsafeRange(buckets.Meta, finishedCompactKeyName, nil, 0)
+ if len(finishedCompactBytes) != 0 {
+ s.revMu.Lock()
+ s.compactMainRev = bytesToRev(finishedCompactBytes[0]).Main
+
+ s.lg.Info(
+ "restored last compact revision",
+ zap.Stringer("meta-bucket-name", buckets.Meta),
+ zap.String("meta-bucket-name-Key", string(finishedCompactKeyName)),
+ zap.Int64("restored-compact-revision", s.compactMainRev),
+ )
+ s.revMu.Unlock()
+ }
+ _, scheduledCompactBytes := tx.UnsafeRange(buckets.Meta, scheduledCompactKeyName, nil, 0)
+ scheduledCompact := int64(0)
+ if len(scheduledCompactBytes) != 0 {
+ scheduledCompact = bytesToRev(scheduledCompactBytes[0]).Main
+ }
+
+ // index keys concurrently as they're loaded in from tx
+ rkvc, revc := restoreIntoIndex(s.lg, s.kvindex)
+ for {
+ keys, vals := tx.UnsafeRange(buckets.Key, min, max, int64(restoreChunkKeys))
+ if len(keys) == 0 {
+ break
+ }
+ // rkvc blocks if the total pending keys exceeds the restore
+ // chunk size to keep keys from consuming too much memory.
+ restoreChunk(s.lg, rkvc, keys, vals, keyToLease)
+ if len(keys) < restoreChunkKeys {
+ // partial set implies final set
+ break
+ }
+ // next set begins after where this one ended
+ newMin := bytesToRev(keys[len(keys)-1][:revBytesLen])
+ newMin.Sub++
+ revToBytes(newMin, min)
+ }
+ close(rkvc)
+
+ {
+ s.revMu.Lock()
+ s.currentRev = <-revc
+
+ // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
+ // the correct revision should be set to compaction revision in the case, not the largest revision
+ // we have seen.
+ if s.currentRev < s.compactMainRev {
+ s.currentRev = s.compactMainRev
+ }
+ s.revMu.Unlock()
+ }
+
+ if scheduledCompact <= s.compactMainRev {
+ scheduledCompact = 0
+ }
+
+ for key, lid := range keyToLease {
+ if s.le == nil {
+ tx.Unlock()
+ panic("no lessor to attach lease")
+ }
+ err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
+ if err != nil {
+ s.lg.Error(
+ "failed to attach a lease",
+ zap.String("lease-id", fmt.Sprintf("%016x", lid)),
+ zap.Error(err),
+ )
+ }
+ }
+
+ tx.Unlock()
+
+ s.lg.Info("kvstore restored", zap.Int64("current-rev", s.currentRev))
+
+ if scheduledCompact != 0 {
+ if _, err := s.compactLockfree(scheduledCompact); err != nil {
+ s.lg.Warn("compaction encountered error", zap.Error(err))
+ }
+
+ s.lg.Info(
+ "resume scheduled compaction",
+ zap.Stringer("meta-bucket-name", buckets.Meta),
+ zap.String("meta-bucket-name-Key", string(scheduledCompactKeyName)),
+ zap.Int64("scheduled-compact-revision", scheduledCompact),
+ )
+ }
+
+ return nil
+}
+
+type revKeyValue struct {
+ key []byte
+ kv mvccpb.KeyValue
+ kstr string
+}
+
+func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) {
+ rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
+ go func() {
+ currentRev := int64(1)
+ defer func() { revc <- currentRev }()
+ // restore the tree index from streaming the unordered index.
+ kiCache := make(map[string]*keyIndex, restoreChunkKeys)
+ for rkv := range rkvc {
+ ki, ok := kiCache[rkv.kstr]
+ // purge kiCache if many keys but still missing in the cache
+ if !ok && len(kiCache) >= restoreChunkKeys {
+ i := 10
+ for k := range kiCache {
+ delete(kiCache, k)
+ if i--; i == 0 {
+ break
+ }
+ }
+ }
+ // cache miss, fetch from tree index if there
+ if !ok {
+ ki = &keyIndex{Key: rkv.kv.Key}
+ if idxKey := idx.KeyIndex(ki); idxKey != nil {
+ kiCache[rkv.kstr], ki = idxKey, idxKey
+ ok = true
+ }
+ }
+ rev := bytesToRev(rkv.key)
+ currentRev = rev.Main
+ if ok {
+ if isTombstone(rkv.key) {
+ if err := ki.tombstone(lg, rev.Main, rev.Sub); err != nil {
+ lg.Warn("tombstone encountered error", zap.Error(err))
+ }
+ continue
+ }
+ ki.put(lg, rev.Main, rev.Sub)
+ } else if !isTombstone(rkv.key) {
+ ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
+ idx.Insert(ki)
+ kiCache[rkv.kstr] = ki
+ }
+ }
+ }()
+ return rkvc, revc
+}
+
+func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
+ for i, key := range keys {
+ rkv := revKeyValue{key: key}
+ if err := rkv.kv.Unmarshal(vals[i]); err != nil {
+ lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
+ }
+ rkv.kstr = string(rkv.kv.Key)
+ if isTombstone(key) {
+ delete(keyToLease, rkv.kstr)
+ } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
+ keyToLease[rkv.kstr] = lid
+ } else {
+ delete(keyToLease, rkv.kstr)
+ }
+ kvc <- rkv
+ }
+}
+
+func (s *store) Close() error {
+ close(s.stopc)
+ s.fifoSched.Stop()
+ return nil
+}
+
+// appendMarkTombstone appends tombstone mark to normal revision bytes.
+func appendMarkTombstone(lg *zap.Logger, b []byte) []byte {
+ if len(b) != revBytesLen {
+ lg.Panic(
+ "cannot append tombstone mark to non-normal revision bytes",
+ zap.Int("expected-revision-bytes-size", revBytesLen),
+ zap.Int("given-revision-bytes-size", len(b)),
+ )
+ }
+ return append(b, markTombstone)
+}
+
+// isTombstone checks whether the revision bytes is a tombstone.
+func isTombstone(b []byte) bool {
+ return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
+}
diff --git a/etcd/mvcc/kvstore_compaction.go b/etcd/mvcc/kvstore_compaction.go
new file mode 100644
index 00000000000..0a655ca0ec0
--- /dev/null
+++ b/etcd/mvcc/kvstore_compaction.go
@@ -0,0 +1,77 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "encoding/binary"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ "go.uber.org/zap"
+)
+
+// scheduleCompaction 任务遍历、删除 Key 的过程可能会对 boltdb 造成压力,为了不影响正常读写请求,它在执行过程中会通过参数控制每次遍历、
+// 删除的 Key 数(默认为 100,每批间隔 10ms),分批完成 boltdb Key 的删除操作.
+func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool {
+ totalStart := time.Now()
+ keyCompactions := 0
+
+ end := make([]byte, 8)
+ binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
+
+ last := make([]byte, 8+1+8)
+ for {
+ var rev revision
+
+ tx := s.b.BatchTx()
+ tx.Lock()
+ keys, _ := tx.UnsafeRange(buckets.Key, last, end, int64(s.cfg.CompactionBatchLimit))
+ for _, key := range keys {
+ rev = bytesToRev(key)
+ if _, ok := keep[rev]; !ok {
+ tx.UnsafeDelete(buckets.Key, key)
+ keyCompactions++
+ }
+ }
+
+ if len(keys) < s.cfg.CompactionBatchLimit {
+ rbytes := make([]byte, 8+1+8)
+ revToBytes(revision{Main: compactMainRev}, rbytes)
+ tx.UnsafePut(buckets.Meta, finishedCompactKeyName, rbytes)
+ tx.Unlock()
+ s.lg.Info(
+ "finished scheduled compaction",
+ zap.Int64("compact-revision", compactMainRev),
+ zap.Duration("took", time.Since(totalStart)),
+ )
+ return true
+ }
+
+ // update last
+ revToBytes(revision{Main: rev.Main, Sub: rev.Sub + 1}, last)
+ tx.Unlock()
+ // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer
+ s.b.ForceCommit()
+
+ select {
+ case <-time.After(10 * time.Millisecond):
+ case <-s.stopc:
+ return false
+ }
+ }
+}
+
+// 当我们通过 boltdb 删除大量的 Key,在事务提交后 B+ tree 经过分裂、平衡,会释放出若干 branch/leaf page 页面,然而 boltdb 并不会将其释放给磁盘,
+// 调整 db 大小操作是昂贵的,会对性能有较大的损害.
diff --git a/etcd/mvcc/kvstore_txn.go b/etcd/mvcc/kvstore_txn.go
new file mode 100644
index 00000000000..c9ec85b1c90
--- /dev/null
+++ b/etcd/mvcc/kvstore_txn.go
@@ -0,0 +1,68 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+)
+
+type storeTxnRead struct {
+ s *store
+ tx backend.ReadTx
+ firstRev int64
+ rev int64 // 总的修订版本
+ trace *traceutil.Trace
+}
+
+func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev }
+
+func (tr *storeTxnRead) Rev() int64 {
+ return tr.rev
+}
+
+func (tr *storeTxnRead) End() {
+ tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx.
+ tr.s.mu.RUnlock()
+}
+
+type storeTxnWrite struct {
+ storeTxnRead
+ tx backend.BatchTx
+ beginRev int64 // 是TXN开始时的修订版本;它将写到下次修订.
+ changes []mvccpb.KeyValue // 写事务接收到的k,v 包含修订版本数据
+}
+
+func (tw *storeTxnWrite) Rev() int64 {
+ return tw.beginRev
+}
+
+func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes }
+
+// End 主要是用来解锁
+func (tw *storeTxnWrite) End() {
+ // 只有在Txn修改了Mvcc状态时才会更新索引.
+ if len(tw.changes) != 0 {
+ // 保持revMu锁,以防止新的读Txns打开,直到写回.
+ tw.s.revMu.Lock()
+ tw.s.currentRev++
+ }
+ tw.tx.Unlock()
+ if len(tw.changes) != 0 {
+ tw.s.revMu.Unlock()
+ }
+ tw.s.mu.RUnlock()
+}
diff --git a/etcd/mvcc/over_kv.go b/etcd/mvcc/over_kv.go
new file mode 100644
index 00000000000..7620d2a9d4c
--- /dev/null
+++ b/etcd/mvcc/over_kv.go
@@ -0,0 +1,86 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+)
+
+// RangeOptions 请求参数
+type RangeOptions struct {
+ Limit int64 // 用户限制的数据量
+ Rev int64 // 指定的修订版本
+ Count bool // 是否统计修订版本数
+}
+
+// RangeResult 响应
+type RangeResult struct {
+ KVs []mvccpb.KeyValue
+ Rev int64 // 最新的修订版本
+ Count int // 统计当前的 修订版本数
+}
+
+type ReadView interface {
+ // FirstRev
+ // before cur
+ // compact
+ // rev rev rev
+ FirstRev() int64 // 在打开txn时返回第一个KV修订.在压实之后,第一个修订增加到压实修订.
+ Rev() int64 // 在打开txn时返回KV的修订.
+ Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) // 读取数据
+}
+
+// TxnRead 只读事务,不会锁住其他只读事务
+type TxnRead interface {
+ ReadView
+ End() // 标记事务已完成 并且准备提交
+}
+
+type WriteView interface {
+ DeleteRange(key, end []byte) (n, rev int64) // 删除指定范围的数据
+ // Put 将给定的k v放入存储区.Put还接受额外的参数lease,将lease作为元数据附加到键值对上.KV实现 不验证租约id.
+ // put还会增加存储的修订版本,并在事件历史中生成一个事件.返回的修订版本是执行操作时KV的当前修订版本.
+ Put(key, value []byte, lease lease.LeaseID) (rev int64)
+}
+
+type TxnWrite interface {
+ TxnRead
+ WriteView
+ // Changes 获取打开write txn后所做的更改.
+ Changes() []mvccpb.KeyValue
+}
+
+// txnReadWrite 读事务-->写事务,对任何写操作都感到恐慌.
+type txnReadWrite struct {
+ TxnRead
+}
+
+func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") }
+func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+ panic("unexpected Put")
+}
+func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil }
+
+func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} }
+
+type ReadTxMode uint32
+
+const (
+ ConcurrentReadTxMode = ReadTxMode(1) // 缓冲区拷贝,提高性能 并发ReadTx模式
+ SharedBufReadTxMode = ReadTxMode(2)
+)
diff --git a/etcd/mvcc/over_kv_del.go b/etcd/mvcc/over_kv_del.go
new file mode 100644
index 00000000000..ce4b8c75d1b
--- /dev/null
+++ b/etcd/mvcc/over_kv_del.go
@@ -0,0 +1,78 @@
+package mvcc
+
+import (
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+ "go.uber.org/zap"
+)
+
+// DeleteRange 1
+func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) {
+ tw := wv.kv.Write(traceutil.TODO())
+ defer tw.End()
+
+ return tw.(*storeTxnWrite).DeleteRange(key, end)
+}
+
+// DeleteRange 2
+func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) {
+ if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 {
+ return n, tw.beginRev + 1
+ }
+ return 0, tw.beginRev
+}
+
+// 从k,v index中删除
+func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 {
+ rrev := tw.beginRev
+ if len(tw.changes) > 0 {
+ rrev++
+ }
+ keys, _ := tw.s.kvindex.Range(key, end, rrev)
+ if len(keys) == 0 {
+ return 0
+ }
+ for _, key := range keys {
+ tw.delete(key)
+ } // 4.20 号
+ return int64(len(keys))
+}
+
+// bolt.db 删除数据
+func (tw *storeTxnWrite) delete(key []byte) {
+ indexBytes := newRevBytes()
+ idxRev := revision{Main: tw.beginRev + 1, Sub: int64(len(tw.changes))}
+ revToBytes(idxRev, indexBytes)
+
+ indexBytes = appendMarkTombstone(tw.storeTxnRead.s.lg, indexBytes)
+
+ kv := mvccpb.KeyValue{Key: string(key)}
+
+ d, err := kv.Marshal()
+ if err != nil {
+ tw.storeTxnRead.s.lg.Fatal("序列化失败 mvccpb.KeyValue", zap.Error(err))
+ }
+
+ tw.tx.UnsafeSeqPut(buckets.Key, indexBytes, d)
+ err = tw.s.kvindex.Tombstone(key, idxRev)
+ if err != nil {
+ tw.storeTxnRead.s.lg.Fatal(
+ "failed to tombstone an existing Key",
+ zap.String("Key", string(key)),
+ zap.Error(err),
+ )
+ }
+ tw.changes = append(tw.changes, kv)
+
+ item := lease.LeaseItem{Key: string(key)}
+ leaseID := tw.s.le.GetLease(item)
+
+ if leaseID != lease.NoLease {
+ err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item})
+ if err != nil {
+ tw.storeTxnRead.s.lg.Error("未能从key上分离出旧租约", zap.Error(err))
+ }
+ }
+}
diff --git a/etcd/mvcc/over_kv_get.go b/etcd/mvcc/over_kv_get.go
new file mode 100644
index 00000000000..e507bf46865
--- /dev/null
+++ b/etcd/mvcc/over_kv_get.go
@@ -0,0 +1,77 @@
+package mvcc
+
+import (
+ "context"
+
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "go.uber.org/zap"
+)
+
+// Range OK
+func (tr *storeTxnRead) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ return tr.rangeKeys(ctx, key, end, tr.Rev(), ro)
+}
+
+func (tw *storeTxnWrite) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ rev := tw.beginRev
+ if len(tw.changes) > 0 {
+ rev++
+ }
+ return tw.rangeKeys(ctx, key, end, rev, ro)
+}
+
+// OK
+func (tr *storeTxnRead) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) {
+ rev := ro.Rev // 指定修订版本
+ if rev > curRev {
+ return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev
+ }
+ if rev <= 0 {
+ rev = curRev
+ }
+ if rev < tr.s.compactMainRev {
+ return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted
+ }
+ if ro.Count { // 是否统计修订版本数
+ total := tr.s.kvindex.CountRevisions(key, end, rev)
+ tr.trace.Step("从内存索引树中统计修订数")
+ return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil
+ }
+ // 获取版本数据
+ revpairs, total := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit))
+ tr.trace.Step("从内存索引树中获取指定范围的keys")
+ if len(revpairs) == 0 {
+ return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil
+ }
+
+ limit := int(ro.Limit)
+ if limit <= 0 || limit > len(revpairs) {
+ limit = len(revpairs) // 实际收到的数据量
+ }
+
+ kvs := make([]mvccpb.KeyValue, limit)
+ revBytes := newRevBytes() // len 为17的数组
+ // 拿着索引数据去bolt.db 查数据
+ for i, revpair := range revpairs[:len(kvs)] {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ revToBytes(revpair, revBytes)
+ // 根据修订版本获取数据
+ _, vs := tr.tx.UnsafeRange(buckets.Key, revBytes, nil, 0)
+ if len(vs) != 1 {
+ tr.s.lg.Fatal("Range找不到修订对", zap.Int64("revision-Main", revpair.Main), zap.Int64("revision-Sub", revpair.Sub))
+ }
+ if err := kvs[i].Unmarshal([]byte(vs[0])); err != nil {
+ tr.s.lg.Fatal(
+ "反序列失败 mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ }
+ }
+ tr.trace.Step("从bolt.db 中range Key")
+ return &RangeResult{KVs: kvs, Count: total, Rev: curRev}, nil
+}
diff --git a/etcd/mvcc/over_kv_interface.go b/etcd/mvcc/over_kv_interface.go
new file mode 100644
index 00000000000..87be597ad80
--- /dev/null
+++ b/etcd/mvcc/over_kv_interface.go
@@ -0,0 +1,28 @@
+package mvcc
+
+import (
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+)
+
+type KV interface {
+ ReadView
+ WriteView
+ Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead // 创建读事务
+ Write(trace *traceutil.Trace) TxnWrite // 创建写事务
+ Hash() (hash uint32, revision int64, err error) // 计算kv存储的hash值
+ HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error) // 计算所有MVCC修订到给定修订的哈希值.
+ Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) // 释放所有被替换的修订数小于rev的键.
+ Commit() // 将未完成的TXNS提交到底层后端.
+ Restore(b backend.Backend) error
+ Close() error
+}
+
+type WatchableKV interface {
+ KV
+ Watchable
+}
+
+type Watchable interface {
+ NewWatchStream() WatchStream
+}
diff --git a/etcd/mvcc/over_kv_put.go b/etcd/mvcc/over_kv_put.go
new file mode 100644
index 00000000000..c0899ba4b0c
--- /dev/null
+++ b/etcd/mvcc/over_kv_put.go
@@ -0,0 +1,74 @@
+package mvcc
+
+import (
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "go.uber.org/zap"
+)
+
+// OK
+func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
+ rev := tw.beginRev + 1 // 事务开始时有一个ID,写这个操作,对应的ID应+1
+ c := rev
+ oldLease := lease.NoLease
+
+ // 如果该键之前存在,使用它之前创建的并获取它之前的leaseID
+ _, created, beforeVersion, err := tw.s.kvindex.Get(key, rev) // 0,0,nil <= rev的最新修改
+ if err == nil {
+ c = created.Main
+ oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
+ tw.trace.Step("获取键先前的created_revision和leaseID")
+ }
+ indexBytes := newRevBytes()
+ idxRev := revision{Main: rev, Sub: int64(len(tw.changes))} // 当前请求的修订版本
+ revToBytes(idxRev, indexBytes)
+
+ kv := mvccpb.KeyValue{
+ Key: string(key),
+ Value: string(value),
+ CreateRevision: c, // 当前代,创建时的修订版本
+ ModRevision: rev, // 修订版本
+ Version: beforeVersion + 1, // Version是key的版本.删除键会将该键的版本重置为0,对键的任何修改都会增加它的版本.
+ Lease: int64(leaseID), // 租约ID
+ }
+
+ d, err := kv.Marshal()
+ if err != nil {
+ tw.storeTxnRead.s.lg.Fatal("序列化失败 mvccpb.KeyValue", zap.Error(err))
+ }
+
+ tw.trace.Step("序列化 mvccpb.KeyValue")
+ tw.tx.UnsafeSeqPut(buckets.Key, indexBytes, d) // ✅ 写入db,buf
+ _ = (&treeIndex{}).Put
+ tw.s.kvindex.Put(key, idxRev) // 当前请求的修订版本
+ tw.changes = append(tw.changes, kv)
+ tw.trace.Step("存储键值对到bolt.db")
+
+ // 如果用户没穿,就是 NoLease
+ if oldLease != lease.NoLease {
+ if tw.s.le == nil {
+ panic("没找到租约")
+ }
+ // 分离旧的租约
+ err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}})
+ if err != nil {
+ tw.storeTxnRead.s.lg.Error("从key中分离旧的租约失败", zap.Error(err))
+ }
+ }
+ if leaseID != lease.NoLease {
+ if tw.s.le == nil {
+ panic("没找到租约")
+ }
+ err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}})
+ if err != nil {
+ panic("租约附加失败")
+ }
+ }
+ tw.trace.Step("附加租约到key")
+}
+
+func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 {
+ tw.put(key, value, lease)
+ return tw.beginRev + 1
+}
diff --git a/etcd/mvcc/over_kv_view.go b/etcd/mvcc/over_kv_view.go
new file mode 100644
index 00000000000..250e472bcf1
--- /dev/null
+++ b/etcd/mvcc/over_kv_view.go
@@ -0,0 +1,50 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+)
+
+type readView struct{ kv KV }
+
+func (rv *readView) FirstRev() int64 {
+ tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO())
+ defer tr.End()
+ return tr.FirstRev()
+}
+
+func (rv *readView) Rev() int64 {
+ tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO()) // 并发ReadTx模式
+ defer tr.End()
+ return tr.Rev()
+}
+
+func (rv *readView) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO())
+ defer tr.End()
+ return tr.Range(ctx, key, end, ro)
+}
+
+type writeView struct{ kv KV }
+
+func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+ tw := wv.kv.Write(traceutil.TODO())
+ defer tw.End()
+ return tw.Put(key, value, lease)
+}
diff --git a/etcd/mvcc/over_revision.go b/etcd/mvcc/over_revision.go
new file mode 100644
index 00000000000..f3618377670
--- /dev/null
+++ b/etcd/mvcc/over_revision.go
@@ -0,0 +1,53 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import "encoding/binary"
+
+// revBytesLen 正常修订版本的长度
+const revBytesLen = 8 + 1 + 8
+
+func (a revision) GreaterThan(b revision) bool {
+ if a.Main > b.Main {
+ return true
+ }
+ if a.Main < b.Main {
+ return false
+ }
+ return a.Sub > b.Sub
+}
+
+func newRevBytes() []byte {
+ return make([]byte, revBytesLen, markedRevBytesLen)
+}
+
+func revToBytes(rev revision, bytes []byte) {
+ binary.BigEndian.PutUint64(bytes, uint64(rev.Main))
+ bytes[8] = '_'
+ binary.BigEndian.PutUint64(bytes[9:], uint64(rev.Sub))
+}
+
+func bytesToRev(bytes []byte) revision {
+ return revision{
+ Main: int64(binary.BigEndian.Uint64(bytes[0:8])),
+ Sub: int64(binary.BigEndian.Uint64(bytes[9:])),
+ }
+}
+
+type revisions []revision
+
+func (a revisions) Len() int { return len(a) }
+func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) }
+func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/etcd/mvcc/over_watchable_store.go b/etcd/mvcc/over_watchable_store.go
new file mode 100644
index 00000000000..8e38bffc4a1
--- /dev/null
+++ b/etcd/mvcc/over_watchable_store.go
@@ -0,0 +1,503 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/lease"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/buckets"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+
+ "go.uber.org/zap"
+)
+
+var (
+ // chanBufLen is the length of the buffered chan
+ // for sending out watched events.
+ // See https://github.com/etcd-io/etcd/issues/11906 for more detail.
+ chanBufLen = 128
+
+ // maxWatchersPerSync is the number of watchers to sync in a single batch
+ maxWatchersPerSync = 512
+)
+
+type watchable interface {
+ watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
+ progress(w *watcher)
+ rev() int64
+}
+
+type watchableStore struct {
+ *store
+ mu sync.RWMutex
+ victims []watcherBatch // 因为channel阻塞而暂存的
+ victimc chan struct{} // 如果watcher实例关联的ch通道被阻塞了,则对应的watcherBatch实例会暂时记录到该字段中
+ unsynced watcherGroup // 用于存储未同步完成的实例
+ synced watcherGroup // 用于存储同步完成的实例
+ stopc chan struct{}
+ wg sync.WaitGroup
+}
+
+// cancelFunc updates unsynced and synced maps when running
+// cancel operations.
+type cancelFunc func()
+
+func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) WatchableKV {
+ return newWatchableStore(lg, b, le, cfg)
+}
+
+func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *watchableStore {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ s := &watchableStore{
+ store: NewStore(lg, b, le, cfg),
+ victimc: make(chan struct{}, 1), // 如果watcher实例关联的ch通道被阻塞了,则对应的watcherBatch实例会暂时记录到该字段中
+ unsynced: newWatcherGroup(), // 用于存储未同步完成的实例
+ synced: newWatcherGroup(), // 用于存储已经同步完成的实例
+ stopc: make(chan struct{}),
+ }
+ s.store.ReadView = &readView{s} // 调用storage中全局view查询
+ s.store.WriteView = &writeView{s} // 调用storage中全局view查询
+ if s.le != nil {
+ // 使用此存储作为删除器,因此撤销触发器监听事件
+ s.le.SetRangeDeleter(func() lease.TxnDelete {
+ return s.Write(traceutil.TODO())
+ })
+ }
+ s.wg.Add(2)
+ go s.syncWatchersLoop()
+ go s.syncVictimsLoop() // 用于循环清除watchableStore中的victims
+ return s
+}
+
+func (s *watchableStore) Close() error {
+ close(s.stopc)
+ s.wg.Wait()
+ return s.store.Close()
+}
+
+func (s *watchableStore) NewWatchStream() WatchStream {
+ return &watchStream{
+ watchable: s,
+ ch: make(chan WatchResponse, chanBufLen),
+ cancels: make(map[WatchID]cancelFunc),
+ watchers: make(map[WatchID]*watcher),
+ }
+}
+
+// watcher 初始化
+func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
+ wa := &watcher{
+ key: string(key),
+ end: string(end),
+ minRev: startRev,
+ id: id,
+ ch: ch, // 将变更事件塞进去,可能会与其他watcher 共享
+ filterFuncs: fcs,
+ }
+
+ s.mu.Lock()
+ s.revMu.RLock()
+ // 为0 或者是超过最新的修订版本 , 都设置为下一个修订版本
+ synced := startRev > s.store.currentRev || startRev == 0
+ if synced {
+ wa.minRev = s.store.currentRev + 1
+ if startRev > wa.minRev {
+ wa.minRev = startRev
+ }
+ s.synced.add(wa) // 把当前watcher 定义为已经同步完的
+ } else {
+ s.unsynced.add(wa) // 把当前watcher 定义为 没有同步完的
+ }
+ s.revMu.RUnlock()
+ s.mu.Unlock()
+
+ return wa, func() { s.cancelWatcher(wa) }
+}
+
+// 移除watcher
+func (s *watchableStore) cancelWatcher(wa *watcher) {
+ for {
+ s.mu.Lock()
+ if s.unsynced.delete(wa) {
+ break
+ } else if s.synced.delete(wa) {
+ break
+ } else if wa.compacted { // 因日志压缩,该watcher已被移除
+ break
+ } else if wa.ch == nil { // 判断是否还能发送数据
+ // already canceled (e.g., cancel/close race)
+ break
+ }
+
+ if !wa.victim {
+ s.mu.Unlock()
+ panic("观察者不是受害者,但不在观察组中")
+ }
+
+ var victimBatch watcherBatch
+ for _, wb := range s.victims {
+ if wb[wa] != nil {
+ victimBatch = wb
+ break
+ }
+ }
+ if victimBatch != nil {
+ delete(victimBatch, wa)
+ break
+ }
+
+ // victim being processed so not accessible; retry
+ s.mu.Unlock()
+ time.Sleep(time.Millisecond)
+ }
+
+ wa.ch = nil
+ s.mu.Unlock()
+}
+
+func (s *watchableStore) Restore(b backend.Backend) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ err := s.store.Restore(b)
+ if err != nil {
+ return err
+ }
+
+ for wa := range s.synced.watchers {
+ wa.restore = true
+ s.unsynced.add(wa)
+ }
+ s.synced = newWatcherGroup()
+ return nil
+}
+
+// syncWatchersLoop 每隔100ms ,将所有未通知的事件通知给所有的监听者.
+func (s *watchableStore) syncWatchersLoop() {
+ defer s.wg.Done()
+
+ for {
+ s.mu.RLock()
+ st := time.Now()
+ lastUnsyncedWatchers := s.unsynced.size() // 获取当前的unsynced watcherGroup的大小
+ s.mu.RUnlock()
+
+ unsyncedWatchers := 0
+ if lastUnsyncedWatchers > 0 {
+ // 存在需要进行同步的watcher实例,调用syncWatchers()方法对unsynced watcherGroup中的watcher进行批量同步
+ // 会尝试发送
+ unsyncedWatchers = s.syncWatchers()
+ }
+ syncDuration := time.Since(st)
+
+ waitDuration := 100 * time.Millisecond
+ // 阻塞中的worker
+ if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers {
+ waitDuration = syncDuration
+ }
+
+ select {
+ case <-time.After(waitDuration):
+ case <-s.stopc:
+ return
+ }
+ }
+}
+
+// syncVictimsLoop 尝试将因通道阻塞的数据 重新发送,如失败,可将watcher重新放入unsyncedGroup,
+// 如果发送成功,则根据相应的watcher的同步情况,将watcher实例迁移到(un)synced watcherGroup中.
+func (s *watchableStore) syncVictimsLoop() {
+ defer s.wg.Done()
+
+ for {
+ for s.moveVictims() != 0 {
+ // 尝试更新有问题的watcher 受损的
+ }
+ s.mu.RLock()
+ isEmpty := len(s.victims) == 0
+ s.mu.RUnlock()
+
+ var tickc <-chan time.Time
+ if !isEmpty {
+ tickc = time.After(10 * time.Millisecond)
+ }
+
+ select {
+ case <-tickc:
+ case <-s.victimc: // 读数据,表示不再受损
+ case <-s.stopc:
+ return
+ }
+ }
+}
+
+// 尝试更新阻塞中的事件数据
+func (s *watchableStore) moveVictims() (moved int) {
+ s.mu.Lock()
+ victims := s.victims
+ s.victims = nil
+ s.mu.Unlock()
+
+ var newVictim watcherBatch
+ for _, wb := range victims {
+ // try to send responses again
+ // 尝试发送受损的响应【因通道阻塞导致的】
+ for w, eb := range wb {
+ rev := w.minRev - 1
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
+ } else {
+ if newVictim == nil {
+ newVictim = make(watcherBatch)
+ }
+ newVictim[w] = eb
+ continue
+ }
+ moved++
+ }
+
+ s.mu.Lock()
+ s.store.revMu.RLock()
+ curRev := s.store.currentRev
+ for w, eb := range wb {
+ if newVictim != nil && newVictim[w] != nil {
+ // 不能发送响应数据的,继续保持受损状态
+ continue
+ }
+ w.victim = false
+ if eb.moreRev != 0 {
+ w.minRev = eb.moreRev
+ }
+ // 当该watcher不再受损 ,通过watcher的修订版本与全局的修订版本,判断该watcher是存入synced还是unsynced
+ if w.minRev <= curRev {
+ s.unsynced.add(w)
+ } else {
+ s.synced.add(w)
+ }
+ }
+ s.store.revMu.RUnlock()
+ s.mu.Unlock()
+ }
+
+ if len(newVictim) > 0 {
+ s.mu.Lock()
+ s.victims = append(s.victims, newVictim)
+ s.mu.Unlock()
+ }
+
+ return moved
+}
+
+// syncWatchers 向所有未同步完成的watcher 发消息
+// 1. choose a set of watchers from the unsynced watcher group
+// 2. iterate over the set to get the minimum revision and remove compacted watchers
+// 3. use minimum revision to get all Key-value pairs and send those events to watchers
+// 4. remove synced watchers in set from unsynced group and move to synced group
+func (s *watchableStore) syncWatchers() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.unsynced.size() == 0 {
+ return 0
+ }
+
+ s.store.revMu.RLock()
+ defer s.store.revMu.RUnlock()
+
+ curRev := s.store.currentRev
+ compactionRev := s.store.compactMainRev
+ // 根据unsynced watcherGroup中记录的watcher个数对其进行分批返回,同时获取该批watcher实例中查找最小的minRev字段,maxWatchersPerSync默认为512
+ wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev)
+ minBytes, maxBytes := newRevBytes(), newRevBytes()
+ revToBytes(revision{Main: minRev}, minBytes)
+ revToBytes(revision{Main: curRev + 1}, maxBytes)
+
+ tx := s.store.b.ReadTx()
+ tx.RLock()
+ revs, vs := tx.UnsafeRange(buckets.Key, minBytes, maxBytes, 0) // 对key Bucket进行范围查找
+ evs := kvsToEvents(s.store.lg, wg, revs, vs) // 负责将BoltDB中查询的键值对信息转换成相应的event实例
+ tx.RUnlock()
+
+ var victims watcherBatch
+ wb := newWatcherBatch(wg, evs)
+ for w := range wg.watchers { // 事件发送值每一个watcher对应的Channel中
+ w.minRev = curRev + 1
+
+ eb, ok := wb[w]
+ if !ok {
+ // bring un-notified watcher to synced
+ s.synced.add(w)
+ s.unsynced.delete(w)
+ continue
+ }
+
+ if eb.moreRev != 0 {
+ w.minRev = eb.moreRev
+ }
+
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
+ // 往通道里发送的消息
+ } else {
+ // case 1 确实是发送失败
+ // case 2 通道阻塞了,暂时标记位victim
+ if victims == nil {
+ victims = make(watcherBatch)
+ }
+ w.victim = true
+ }
+
+ if w.victim {
+ victims[w] = eb
+ } else {
+ if eb.moreRev != 0 {
+ continue
+ }
+ s.synced.add(w)
+ }
+ s.unsynced.delete(w)
+ }
+ s.addVictim(victims)
+
+ return s.unsynced.size()
+}
+
+// 将BoltDB中查询的键值对信息转换成相应的Event实例,通过判断BoltDB查询的键值对是否存在于watcherGroup的key中,记录mvccpb.PUT or mvccpb.DELETE
+func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) {
+ for i, v := range vals {
+ var kv mvccpb.KeyValue
+ if err := kv.Unmarshal(v); err != nil {
+ lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
+ }
+
+ if !wg.contains(string(kv.Key)) {
+ continue
+ }
+
+ ty := mvccpb.PUT
+ if isTombstone(revs[i]) {
+ ty = mvccpb.DELETE
+ // patch in mod revision so watchers won't skip
+ kv.ModRevision = bytesToRev(revs[i]).Main
+ }
+ evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty})
+ }
+ return evs
+}
+
+// notify 当前的修订版本,当前的变更事件 用于通知对应的watcher
+func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
+ var victim watcherBatch
+ // type watcherBatch map[*watcher]*eventBatch
+ // 找到所有的watch,synced使用了map和红黑树来快速找到监听的key
+ for watcher, eb := range newWatcherBatch(&s.synced, evs) {
+ if eb.revs != 1 {
+ s.store.lg.Panic("在watch通知中出现多次修订", zap.Int("number-of-revisions", eb.revs))
+ }
+ if watcher.send(WatchResponse{WatchID: watcher.id, Events: eb.evs, Revision: rev}) {
+ } else {
+ // 移动缓慢的观察者到victim
+ watcher.minRev = rev + 1
+ if victim == nil {
+ victim = make(watcherBatch)
+ }
+ watcher.victim = true
+ victim[watcher] = eb
+ s.synced.delete(watcher)
+ }
+ }
+ s.addVictim(victim) // 将因为chan满没发出的消息缓存,然后使用unsynced再将消息发送出去
+}
+
+// 添加不健康的事件 watcher:待发送的消息
+func (s *watchableStore) addVictim(victim watcherBatch) {
+ if victim == nil {
+ return
+ }
+ s.victims = append(s.victims, victim)
+ select {
+ case s.victimc <- struct{}{}:
+ default:
+ }
+}
+
+func (s *watchableStore) rev() int64 {
+ return s.store.Rev()
+}
+
+func (s *watchableStore) progress(w *watcher) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ if _, ok := s.synced.watchers[w]; ok {
+ w.send(WatchResponse{WatchID: w.id, Revision: s.rev()})
+ // If the ch is full, this watcher is receiving events.
+ // We do not need to send progress at all.
+ }
+}
+
+type watcher struct {
+ key string
+ end string
+ victim bool // 当ch被阻塞时,并正在进行受害者处理时被设置.
+ compacted bool // 当 watcher 因为压缩而被移除时,compacted被设置
+ // restore is true when the watcher is being restored from leader snapshot
+ // which means that this watcher has just been moved from "synced" to "unsynced"
+ // watcher group, possibly with a future revision when it was first added
+ // to the synced watcher
+ // "unsynced" watcher revision must always be <= current revision,
+ // except when the watcher were to be moved from "synced" watcher group
+ restore bool
+ minRev int64 // 开始监听的修订版本
+ id WatchID // watcher id
+ filterFuncs []FilterFunc // 事件过滤
+ ch chan<- WatchResponse // 将变更事件塞进去,可能会与其他watcher 共享
+}
+
+// 向客户端发送事件
+func (w *watcher) send(wr WatchResponse) bool {
+ progressEvent := len(wr.Events) == 0
+
+ if len(w.filterFuncs) != 0 {
+ ne := make([]mvccpb.Event, 0, len(wr.Events))
+ for i := range wr.Events {
+ filtered := false
+ for _, filter := range w.filterFuncs {
+ if filter(wr.Events[i]) {
+ filtered = true
+ break
+ }
+ }
+ if !filtered {
+ ne = append(ne, wr.Events[i])
+ }
+ }
+ wr.Events = ne
+ }
+
+ // 所有的事件都被过滤掉了
+ if !progressEvent && len(wr.Events) == 0 {
+ return true
+ }
+ select {
+ case w.ch <- wr: // 正常的事件
+ return true
+ default:
+ return false
+ }
+}
diff --git a/etcd/mvcc/over_watchable_store_txn.go b/etcd/mvcc/over_watchable_store_txn.go
new file mode 100644
index 00000000000..8ff28785289
--- /dev/null
+++ b/etcd/mvcc/over_watchable_store_txn.go
@@ -0,0 +1,55 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/pkg/traceutil"
+)
+
+func (tw *watchableStoreTxnWrite) End() {
+ changes := tw.Changes()
+ if len(changes) == 0 {
+ tw.TxnWrite.End()
+ return
+ }
+
+ rev := tw.Rev() + 1
+ evs := make([]mvccpb.Event, len(changes))
+ for i, change := range changes {
+ evs[i].Kv = &changes[i]
+ if change.CreateRevision == 0 {
+ evs[i].Type = mvccpb.DELETE
+ evs[i].Kv.ModRevision = rev
+ } else {
+ evs[i].Type = mvccpb.PUT
+ }
+ }
+
+ // 当异步事件post检查当前存储版本时,在可观察存储锁下写入TXN,因此更新是可见的
+ tw.s.mu.Lock()
+ tw.s.notify(rev, evs) // 事务结束时, 通知watcher
+ tw.TxnWrite.End()
+ tw.s.mu.Unlock()
+}
+
+type watchableStoreTxnWrite struct {
+ TxnWrite
+ s *watchableStore
+}
+
+func (s *watchableStore) Write(trace *traceutil.Trace) TxnWrite {
+ return &watchableStoreTxnWrite{s.store.Write(trace), s}
+}
diff --git a/etcd/mvcc/over_watcher.go b/etcd/mvcc/over_watcher.go
new file mode 100644
index 00000000000..a4630a734af
--- /dev/null
+++ b/etcd/mvcc/over_watcher.go
@@ -0,0 +1,179 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "bytes"
+ "errors"
+ "sync"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+)
+
+// AutoWatchID is the watcher ID passed in WatchStream.Watch when no
+// user-provided ID is available. If pass, an ID will automatically be assigned.
+const AutoWatchID WatchID = 0
+
+var (
+ ErrWatcherNotExist = errors.New("mvcc: watcher does not exist")
+ ErrEmptyWatcherRange = errors.New("mvcc: watcher range is empty")
+ ErrWatcherDuplicateID = errors.New("mvcc: 在WatchStream上提供的重复的watch ID")
+)
+
+type WatchID int64
+
+// FilterFunc returns true if the given event should be filtered out.
+type FilterFunc func(e mvccpb.Event) bool
+
+type WatchStream interface {
+ // Watch 创建watch id 默认为0 , 范围监听 起始的修订版本 事件过滤
+ Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error)
+
+ Chan() <-chan WatchResponse // 所有watch的响应会会被塞入返回的channel
+
+ // RequestProgress requests the progress of the watcher with given ID. The response
+ // will only be sent if the watcher is currently synced.
+ // The responses will be sent through the WatchRespone Chan attached
+ // with this stream to ensure correct ordering.
+ // The responses contains no events. The revision in the response is the progress
+ // of the watchers since the watcher is currently synced.
+ RequestProgress(id WatchID)
+
+ // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be
+ // returned.
+ Cancel(id WatchID) error
+
+ // Close closes Chan and release all related resources.
+ Close()
+ Rev() int64 // 返回当前watch指定的修订版本
+}
+
+type WatchResponse struct {
+ // WatchID is the WatchID of the watcher this response sent to.
+ WatchID WatchID
+
+ // Events contains all the events that needs to send.
+ Events []mvccpb.Event
+
+ // Revision is the revision of the KV when the watchResponse is Created.
+ // For a normal response, the revision should be the same as the last
+ // Modified revision inside Events. For a delayed response to a unsynced
+ // watcher, the revision is greater than the last Modified revision
+ // inside Events.
+ Revision int64
+
+ // CompactRevision is set when the watcher is cancelled due to compaction.
+ CompactRevision int64
+}
+
+// watchStream contains a collection of watchers that share
+// one streaming chan to send out watched events and other control events.
+// watchers的一写信息
+type watchStream struct {
+ watchable watchable
+ ch chan WatchResponse // 用于传递watch 响应的通道
+ mu sync.Mutex // guards fields below it
+ nextID WatchID // 预先分配给这个流中的下一个新的观察者 ,第一次是0
+ closed bool
+ cancels map[WatchID]cancelFunc // 用于取消特定的watcher
+ watchers map[WatchID]*watcher // 记录watcher事件及其Id
+}
+
+// Watch 在当前stream创建watcher并返回 WatchID.
+func (ws *watchStream) Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) {
+ // 防止键>按字典顺序结束的错误范围
+ // 监视请求'WithFromKey'有空字节范围结束
+ if len(end) != 0 && bytes.Compare(key, end) != -1 {
+ return -1, ErrEmptyWatcherRange
+ }
+
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ if ws.closed {
+ return -1, ErrEmptyWatcherRange
+ }
+
+ if id == AutoWatchID {
+ // 因为是从0开始,每次执行都是为了获取一个自增的WatchId 从1开始
+ for ws.watchers[ws.nextID] != nil {
+ ws.nextID++
+ }
+ id = ws.nextID
+ ws.nextID++
+ } else if _, ok := ws.watchers[id]; ok {
+ return -1, ErrWatcherDuplicateID
+ }
+
+ w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
+ ws.cancels[id] = c // 回调函数用于删除watcher
+ ws.watchers[id] = w // 记录watcher事件及其Id
+ return id, nil
+}
+
+func (ws *watchStream) Chan() <-chan WatchResponse {
+ return ws.ch
+}
+
+func (ws *watchStream) Cancel(id WatchID) error {
+ ws.mu.Lock()
+ cancel, ok := ws.cancels[id]
+ w := ws.watchers[id]
+ ok = ok && !ws.closed
+ ws.mu.Unlock()
+
+ if !ok {
+ return ErrWatcherNotExist
+ }
+ cancel()
+
+ ws.mu.Lock()
+ // The watch isn't removed until cancel so that if Close() is called,
+ // it will wait for the cancel. Otherwise, Close() could close the
+ // watch channel while the store is still posting events.
+ if ww := ws.watchers[id]; ww == w {
+ delete(ws.cancels, id)
+ delete(ws.watchers, id)
+ }
+ ws.mu.Unlock()
+
+ return nil
+}
+
+func (ws *watchStream) Close() {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+
+ for _, cancel := range ws.cancels {
+ cancel()
+ }
+ ws.closed = true
+ close(ws.ch)
+}
+
+func (ws *watchStream) Rev() int64 {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ return ws.watchable.rev()
+}
+
+func (ws *watchStream) RequestProgress(id WatchID) {
+ ws.mu.Lock()
+ w, ok := ws.watchers[id]
+ ws.mu.Unlock()
+ if !ok {
+ return
+ }
+ ws.watchable.progress(w)
+}
diff --git a/etcd/mvcc/over_watcher_group.go b/etcd/mvcc/over_watcher_group.go
new file mode 100644
index 00000000000..5f3c352e45a
--- /dev/null
+++ b/etcd/mvcc/over_watcher_group.go
@@ -0,0 +1,293 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/pkg/adt"
+)
+
+// watchBatchMaxRevs is the maximum distinct revisions that
+// may be sent to an unsynced watcher at a time. Declared as
+// var instead of const for testing purposes.
+var watchBatchMaxRevs = 1000
+
+type eventBatch struct {
+ // evs is a batch of revision-ordered events
+ evs []mvccpb.Event
+ // revs is the minimum unique revisions observed for this batch
+ revs int
+ // moreRev is first revision with more events following this batch
+ moreRev int64
+}
+
+// OK
+func (eb *eventBatch) add(ev mvccpb.Event) {
+ if eb.revs > watchBatchMaxRevs {
+ // maxed out batch size
+ return
+ }
+
+ if len(eb.evs) == 0 {
+ // base case
+ eb.revs = 1
+ eb.evs = append(eb.evs, ev)
+ return
+ }
+
+ // revision accounting
+ ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision
+ evRev := ev.Kv.ModRevision
+ if evRev > ebRev {
+ eb.revs++
+ if eb.revs > watchBatchMaxRevs {
+ eb.moreRev = evRev
+ return
+ }
+ }
+
+ eb.evs = append(eb.evs, ev)
+}
+
+type watcherBatch map[*watcher]*eventBatch // 记录了每个watcher 待返回的事件[批]
+
+// 给watcher发送一批事件,存储响应
+func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) {
+ eb := wb[w]
+ if eb == nil {
+ eb = &eventBatch{}
+ wb[w] = eb
+ }
+ eb.add(ev)
+}
+
+// newWatcherBatch 当收到一批事件后,去watchGroup组找匹配的watcher ,然后发送出去
+func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
+ if len(wg.watchers) == 0 { // 没有watcher
+ return nil
+ }
+ wb := make(watcherBatch) // 给watcher发送一批事件
+ for _, ev := range evs {
+ for w := range wg.watcherSetByKey(ev.Kv.Key) {
+ if ev.Kv.ModRevision >= w.minRev {
+ // 不要重复通知
+ wb.add(w, ev)
+ }
+ }
+ }
+ return wb
+}
+
+type watcherSet map[*watcher]struct{}
+
+func (w watcherSet) add(wa *watcher) {
+ if _, ok := w[wa]; ok {
+ panic("添加同一个watcher两次!")
+ }
+ w[wa] = struct{}{}
+}
+
+// 合并watcher
+func (w watcherSet) union(ws watcherSet) {
+ for wa := range ws {
+ w.add(wa)
+ }
+}
+
+func (w watcherSet) delete(wa *watcher) {
+ if _, ok := w[wa]; !ok {
+ panic("要移除的watcher 已丢失!")
+ }
+ delete(w, wa)
+}
+
+type watcherSetByKey map[string]watcherSet // 监听的key
+
+func (w watcherSetByKey) add(wa *watcher) {
+ set := w[wa.key]
+ if set == nil {
+ set = make(watcherSet)
+ w[wa.key] = set
+ }
+ set.add(wa)
+}
+
+func (w watcherSetByKey) delete(wa *watcher) bool {
+ k := wa.key
+ if v, ok := w[k]; ok {
+ if _, ok := v[wa]; ok {
+ delete(v, wa)
+ if len(v) == 0 {
+ // remove the set; nothing left
+ delete(w, k)
+ }
+ return true
+ }
+ }
+ return false
+}
+
+// watcher的集合
+type watcherGroup struct {
+ keyWatchers watcherSetByKey // 监听单个key的watcher
+ ranges adt.IntervalTree // 红黑树 按照间隔排序
+ watchers watcherSet
+}
+
+// 用于存储同步完成、未同步完成的实例
+func newWatcherGroup() watcherGroup {
+ return watcherGroup{
+ keyWatchers: make(watcherSetByKey),
+ ranges: adt.NewIntervalTree(),
+ watchers: make(watcherSet), // 元素集
+ }
+}
+
+// 添加一个watcher
+func (wg *watcherGroup) add(wa *watcher) {
+ wg.watchers.add(wa)
+ if wa.end == "" || len(wa.end) == 0 {
+ wg.keyWatchers.add(wa)
+ return
+ }
+ // 范围监听
+ // 已经注册了interval ?
+ // 红黑树里存储了范围key
+ ivl := adt.NewStringAffineInterval(wa.key, wa.end)
+ if iv := wg.ranges.Find(ivl); iv != nil {
+ iv.Val.(watcherSet).add(wa)
+ return
+ }
+
+ ws := make(watcherSet)
+ ws.add(wa)
+ wg.ranges.Insert(ivl, ws)
+}
+
+// 监听的key在watcherGroup中是否有一个watcher
+func (wg *watcherGroup) contains(key string) bool {
+ _, ok := wg.keyWatchers[key]
+ return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) // 是否有元素与key重叠
+}
+
+// size 返回当前group里有多少元素
+func (wg *watcherGroup) size() int { return len(wg.watchers) }
+
+// 删除watcher
+func (wg *watcherGroup) delete(wa *watcher) bool {
+ if _, ok := wg.watchers[wa]; !ok {
+ return false
+ }
+ wg.watchers.delete(wa)
+ if wa.end == "" || len(wa.end) == 0 {
+ wg.keyWatchers.delete(wa)
+ return true
+ }
+
+ ivl := adt.NewStringAffineInterval(wa.key, wa.end)
+ iv := wg.ranges.Find(ivl)
+ if iv == nil {
+ return false
+ }
+
+ ws := iv.Val.(watcherSet)
+ delete(ws, wa)
+ if len(ws) == 0 {
+ // remove interval missing watchers
+ if ok := wg.ranges.Delete(ivl); !ok {
+ panic("could not remove watcher from interval tree")
+ }
+ }
+
+ return true
+}
+
+// choose selects watchers from the watcher group to update
+func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) {
+ if len(wg.watchers) < maxWatchers {
+ return wg, wg.chooseAll(curRev, compactRev)
+ }
+ ret := newWatcherGroup()
+ for w := range wg.watchers {
+ if maxWatchers <= 0 {
+ break
+ }
+ maxWatchers--
+ ret.add(w)
+ }
+ return &ret, ret.chooseAll(curRev, compactRev)
+}
+
+func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 {
+ minRev := int64(math.MaxInt64)
+ for w := range wg.watchers {
+ if w.minRev > curRev {
+ // after network partition, possibly choosing future revision watcher from restore operation
+ // with watch Key "proxy-namespace__lostleader" and revision "math.MaxInt64 - 2"
+ // do not panic when such watcher had been moved from "synced" watcher during restore operation
+ if !w.restore {
+ panic(fmt.Errorf("watcher minimum revision %d should not exceed current revision %d", w.minRev, curRev))
+ }
+
+ // mark 'restore' done, since it's chosen
+ w.restore = false
+ }
+ if w.minRev < compactRev {
+ select {
+ case w.ch <- WatchResponse{
+ WatchID: w.id,
+ CompactRevision: compactRev,
+ }:
+ w.compacted = true
+ wg.delete(w)
+ default:
+ // retry next time
+ }
+ continue
+ }
+ if minRev > w.minRev {
+ minRev = w.minRev
+ }
+ }
+ return minRev
+}
+
+// watcherSetByKey gets the set of watchers that receive events on the given Key.
+func (wg *watcherGroup) watcherSetByKey(key string) watcherSet {
+ wkeys := wg.keyWatchers[key]
+ wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key))
+
+ // zero-copy cases
+ switch {
+ case len(wranges) == 0:
+ // no need to merge ranges or copy; reuse single-Key set
+ return wkeys
+ case len(wranges) == 0 && len(wkeys) == 0:
+ return nil
+ case len(wranges) == 1 && len(wkeys) == 0:
+ return wranges[0].Val.(watcherSet)
+ }
+
+ // copy case
+ ret := make(watcherSet)
+ ret.union(wg.keyWatchers[key])
+ for _, item := range wranges {
+ ret.union(item.Val.(watcherSet))
+ }
+ return ret
+}
diff --git a/server/proxy/grpcproxy/adapter/auth_client_adapter.go b/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go
similarity index 98%
rename from server/proxy/grpcproxy/adapter/auth_client_adapter.go
rename to etcd/proxy/grpcproxy/adapter/auth_client_adapter.go
index 140212b9620..912c04a54d4 100644
--- a/server/proxy/grpcproxy/adapter/auth_client_adapter.go
+++ b/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go
@@ -17,7 +17,7 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
grpc "google.golang.org/grpc"
)
diff --git a/server/proxy/grpcproxy/adapter/chan_stream.go b/etcd/proxy/grpcproxy/adapter/chan_stream.go
similarity index 98%
rename from server/proxy/grpcproxy/adapter/chan_stream.go
rename to etcd/proxy/grpcproxy/adapter/chan_stream.go
index 1af514b1fdd..f202879c1cf 100644
--- a/server/proxy/grpcproxy/adapter/chan_stream.go
+++ b/etcd/proxy/grpcproxy/adapter/chan_stream.go
@@ -140,7 +140,7 @@ func (s *chanStream) RecvMsg(m interface{}) error {
}
func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream {
- // ch1 is buffered so server can send error on close
+ // ch1 is buffered so etcd can send error on close
ch1, ch2 := make(chan interface{}, 1), make(chan interface{})
headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1)
diff --git a/server/proxy/grpcproxy/adapter/cluster_client_adapter.go b/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go
similarity index 96%
rename from server/proxy/grpcproxy/adapter/cluster_client_adapter.go
rename to etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go
index c1fff054de4..54a5040085a 100644
--- a/server/proxy/grpcproxy/adapter/cluster_client_adapter.go
+++ b/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go
@@ -17,7 +17,7 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
"google.golang.org/grpc"
)
diff --git a/etcd/proxy/grpcproxy/adapter/doc.go b/etcd/proxy/grpcproxy/adapter/doc.go
new file mode 100644
index 00000000000..e6fd2c9ca8c
--- /dev/null
+++ b/etcd/proxy/grpcproxy/adapter/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package adapter provides gRPC adapters between client and etcd
+// gRPC interfaces without needing to go through a gRPC connection.
+package adapter
diff --git a/server/proxy/grpcproxy/adapter/election_client_adapter.go b/etcd/proxy/grpcproxy/adapter/election_client_adapter.go
similarity index 97%
rename from server/proxy/grpcproxy/adapter/election_client_adapter.go
rename to etcd/proxy/grpcproxy/adapter/election_client_adapter.go
index 81d7434474a..f8a17132512 100644
--- a/server/proxy/grpcproxy/adapter/election_client_adapter.go
+++ b/etcd/proxy/grpcproxy/adapter/election_client_adapter.go
@@ -17,7 +17,7 @@ package adapter
import (
"context"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb"
"google.golang.org/grpc"
)
@@ -60,6 +60,7 @@ type es2ecServerStream struct{ chanServerStream }
func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error {
return s.SendMsg(rr)
}
+
func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) {
var v interface{}
if err := s.RecvMsg(&v); err != nil {
@@ -71,6 +72,7 @@ func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) {
func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error {
return s.SendMsg(rr)
}
+
func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) {
var v interface{}
if err := s.RecvMsg(&v); err != nil {
diff --git a/server/proxy/grpcproxy/adapter/kv_client_adapter.go b/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go
similarity index 96%
rename from server/proxy/grpcproxy/adapter/kv_client_adapter.go
rename to etcd/proxy/grpcproxy/adapter/kv_client_adapter.go
index ddb6ada4732..f5ff992b7d9 100644
--- a/server/proxy/grpcproxy/adapter/kv_client_adapter.go
+++ b/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go
@@ -17,7 +17,7 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
grpc "google.golang.org/grpc"
)
diff --git a/server/proxy/grpcproxy/adapter/lease_client_adapter.go b/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go
similarity index 97%
rename from server/proxy/grpcproxy/adapter/lease_client_adapter.go
rename to etcd/proxy/grpcproxy/adapter/lease_client_adapter.go
index 6640d1d39e3..7292966b4ad 100644
--- a/server/proxy/grpcproxy/adapter/lease_client_adapter.go
+++ b/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go
@@ -17,7 +17,7 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
"google.golang.org/grpc"
)
@@ -62,6 +62,7 @@ type ls2lcServerStream struct{ chanServerStream }
func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error {
return s.SendMsg(rr)
}
+
func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) {
var v interface{}
if err := s.RecvMsg(&v); err != nil {
@@ -73,6 +74,7 @@ func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) {
func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error {
return s.SendMsg(rr)
}
+
func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) {
var v interface{}
if err := s.RecvMsg(&v); err != nil {
diff --git a/server/proxy/grpcproxy/adapter/lock_client_adapter.go b/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go
similarity index 94%
rename from server/proxy/grpcproxy/adapter/lock_client_adapter.go
rename to etcd/proxy/grpcproxy/adapter/lock_client_adapter.go
index a3ceaf26dae..8e1187adba0 100644
--- a/server/proxy/grpcproxy/adapter/lock_client_adapter.go
+++ b/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go
@@ -17,7 +17,7 @@ package adapter
import (
"context"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb"
"google.golang.org/grpc"
)
diff --git a/server/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go
similarity index 98%
rename from server/proxy/grpcproxy/adapter/maintenance_client_adapter.go
rename to etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go
index 6369a16d8b4..02c57b76a40 100644
--- a/server/proxy/grpcproxy/adapter/maintenance_client_adapter.go
+++ b/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go
@@ -17,7 +17,7 @@ package adapter
import (
"context"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
"google.golang.org/grpc"
)
@@ -72,6 +72,7 @@ type ss2scServerStream struct{ chanServerStream }
func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error {
return s.SendMsg(rr)
}
+
func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) {
var v interface{}
if err := s.RecvMsg(&v); err != nil {
@@ -83,6 +84,7 @@ func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) {
func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error {
return s.SendMsg(rr)
}
+
func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) {
var v interface{}
if err := s.RecvMsg(&v); err != nil {
diff --git a/server/proxy/grpcproxy/adapter/watch_client_adapter.go b/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go
similarity index 97%
rename from server/proxy/grpcproxy/adapter/watch_client_adapter.go
rename to etcd/proxy/grpcproxy/adapter/watch_client_adapter.go
index 2a93e29e01c..5719942d1e5 100644
--- a/server/proxy/grpcproxy/adapter/watch_client_adapter.go
+++ b/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go
@@ -18,9 +18,8 @@ import (
"context"
"errors"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
"google.golang.org/grpc"
-
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
)
var errAlreadySentHeader = errors.New("adapter: already sent header")
@@ -47,6 +46,7 @@ type ws2wcServerStream struct{ chanServerStream }
func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error {
return s.SendMsg(wr)
}
+
func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) {
var v interface{}
if err := s.RecvMsg(&v); err != nil {
@@ -58,6 +58,7 @@ func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) {
func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error {
return s.SendMsg(wr)
}
+
func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) {
var v interface{}
if err := s.RecvMsg(&v); err != nil {
diff --git a/etcd/proxy/grpcproxy/auth.go b/etcd/proxy/grpcproxy/auth.go
new file mode 100644
index 00000000000..7861b3d2052
--- /dev/null
+++ b/etcd/proxy/grpcproxy/auth.go
@@ -0,0 +1,116 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+type AuthProxy struct {
+ client *clientv3.Client
+}
+
+func NewAuthProxy(c *clientv3.Client) pb.AuthServer {
+ return &AuthProxy{client: c}
+}
+
+func (ap *AuthProxy) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).AuthEnable(ctx, r)
+}
+
+func (ap *AuthProxy) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).AuthDisable(ctx, r)
+}
+
+func (ap *AuthProxy) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).AuthStatus(ctx, r)
+}
+
+func (ap *AuthProxy) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).Authenticate(ctx, r)
+}
+
+func (ap *AuthProxy) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).RoleAdd(ctx, r)
+}
+
+func (ap *AuthProxy) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).RoleDelete(ctx, r)
+}
+
+func (ap *AuthProxy) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).RoleGet(ctx, r)
+}
+
+func (ap *AuthProxy) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).RoleList(ctx, r)
+}
+
+func (ap *AuthProxy) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).RoleRevokePermission(ctx, r)
+}
+
+func (ap *AuthProxy) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).RoleGrantPermission(ctx, r)
+}
+
+func (ap *AuthProxy) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).UserAdd(ctx, r)
+}
+
+func (ap *AuthProxy) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).UserDelete(ctx, r)
+}
+
+func (ap *AuthProxy) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).UserGet(ctx, r)
+}
+
+func (ap *AuthProxy) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).UserList(ctx, r)
+}
+
+func (ap *AuthProxy) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).UserGrantRole(ctx, r)
+}
+
+func (ap *AuthProxy) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).UserRevokeRole(ctx, r)
+}
+
+func (ap *AuthProxy) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ conn := ap.client.ActiveConnection()
+ return pb.NewAuthClient(conn).UserChangePassword(ctx, r)
+}
diff --git a/etcd/proxy/grpcproxy/cache/store.go b/etcd/proxy/grpcproxy/cache/store.go
new file mode 100644
index 00000000000..606ad4e3a81
--- /dev/null
+++ b/etcd/proxy/grpcproxy/cache/store.go
@@ -0,0 +1,171 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cache exports functionality for efficiently caching and mapping
+// `RangeRequest`s to corresponding `RangeResponse`s.
+package cache
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/golang/groupcache/lru"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/adt"
+)
+
+var (
+ DefaultMaxEntries = 2048
+ ErrCompacted = rpctypes.ErrGRPCCompacted
+)
+
+type Cache interface {
+ Add(req *pb.RangeRequest, resp *pb.RangeResponse)
+ Get(req *pb.RangeRequest) (*pb.RangeResponse, error)
+ Compact(revision int64)
+ Invalidate(key []byte, endkey []byte)
+ Size() int
+ Close()
+}
+
+// keyFunc returns the key of a request, which is used to look up its caching response in the cache.
+func keyFunc(req *pb.RangeRequest) string {
+ b, err := req.Marshal()
+ if err != nil {
+ panic(err)
+ }
+ return string(b)
+}
+
+func NewCache(maxCacheEntries int) Cache {
+ return &cache{
+ lru: lru.New(maxCacheEntries),
+ cachedRanges: adt.NewIntervalTree(),
+ compactedRev: -1,
+ }
+}
+
+func (c *cache) Close() {}
+
+// cache implements Cache
+type cache struct {
+ mu sync.RWMutex
+ lru *lru.Cache
+
+ // a reverse index for cache invalidation
+ cachedRanges adt.IntervalTree
+
+ compactedRev int64
+}
+
+// Add adds the response of a request to the cache if its revision is larger than the compacted revision of the cache.
+func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) {
+ key := keyFunc(req)
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if req.Revision > c.compactedRev {
+ c.lru.Add(key, resp)
+ }
+ // we do not need to invalidate a request with a revision specified.
+ // so we do not need to add it into the reverse index.
+ if req.Revision != 0 {
+ return
+ }
+
+ var (
+ iv *adt.IntervalValue
+ ivl adt.Interval
+ )
+ if len(req.RangeEnd) != 0 {
+ ivl = adt.NewStringAffineInterval(string(req.Key), string(req.RangeEnd))
+ } else {
+ ivl = adt.NewStringAffinePoint(string(req.Key))
+ }
+
+ iv = c.cachedRanges.Find(ivl)
+
+ if iv == nil {
+ val := map[string]struct{}{key: {}}
+ c.cachedRanges.Insert(ivl, val)
+ } else {
+ val := iv.Val.(map[string]struct{})
+ val[key] = struct{}{}
+ iv.Val = val
+ }
+}
+
+// Get looks up the caching response for a given request.
+// Get is also responsible for lazy eviction when accessing compacted entries.
+func (c *cache) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) {
+ key := keyFunc(req)
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if req.Revision > 0 && req.Revision < c.compactedRev {
+ c.lru.Remove(key)
+ return nil, ErrCompacted
+ }
+
+ if resp, ok := c.lru.Get(key); ok {
+ return resp.(*pb.RangeResponse), nil
+ }
+ return nil, errors.New("not exist")
+}
+
+// Invalidate invalidates the cache entries that intersecting with the given range from key to endkey.
+func (c *cache) Invalidate(key, endkey []byte) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ var (
+ ivs []*adt.IntervalValue
+ ivl adt.Interval
+ )
+ if len(endkey) == 0 {
+ ivl = adt.NewStringAffinePoint(string(key))
+ } else {
+ ivl = adt.NewStringAffineInterval(string(key), string(endkey))
+ }
+
+ ivs = c.cachedRanges.Stab(ivl)
+ for _, iv := range ivs {
+ keys := iv.Val.(map[string]struct{})
+ for key := range keys {
+ c.lru.Remove(key)
+ }
+ }
+ // delete after removing all keys since it is destructive to 'ivs'
+ c.cachedRanges.Delete(ivl)
+}
+
+// Compact invalidate all caching response before the given rev.
+// Replace with the invalidation is lazy. The actual removal happens when the entries is accessed.
+func (c *cache) Compact(revision int64) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if revision > c.compactedRev {
+ c.compactedRev = revision
+ }
+}
+
+func (c *cache) Size() int {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.lru.Len()
+}
diff --git a/etcd/proxy/grpcproxy/cluster.go b/etcd/proxy/grpcproxy/cluster.go
new file mode 100644
index 00000000000..0baca27a4f0
--- /dev/null
+++ b/etcd/proxy/grpcproxy/cluster.go
@@ -0,0 +1,214 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "golang.org/x/time/rate"
+
+ "go.uber.org/zap"
+)
+
+// allow maximum 1 retry per second
+const resolveRetryRate = 1
+
+type clusterProxy struct {
+ lg *zap.Logger
+ clus clientv3.Cluster
+ ctx context.Context
+
+ // advertise client URL
+ advaddr string
+ prefix string
+
+ em endpoints.Manager
+
+ umu sync.RWMutex
+ umap map[string]endpoints.Endpoint
+}
+
+// NewClusterProxy takes optional prefix to fetch grpc-proxy member endpoints.
+// The returned channel is closed when there is grpc-proxy endpoint registered
+// and the client's context is canceled so the 'register' loop returns.
+// TODO: Expand the API to report creation errors
+func NewClusterProxy(lg *zap.Logger, c *clientv3.Client, advaddr string, prefix string) (pb.ClusterServer, <-chan struct{}) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ var em endpoints.Manager
+ if advaddr != "" && prefix != "" {
+ var err error
+ if em, err = endpoints.NewManager(c, prefix); err != nil {
+ lg.Error("failed to provision endpointsManager", zap.String("prefix", prefix), zap.Error(err))
+ return nil, nil
+ }
+ }
+
+ cp := &clusterProxy{
+ lg: lg,
+ clus: c.Cluster,
+ ctx: c.Ctx(),
+
+ advaddr: advaddr,
+ prefix: prefix,
+ umap: make(map[string]endpoints.Endpoint),
+ em: em,
+ }
+
+ donec := make(chan struct{})
+ if em != nil {
+ go func() {
+ defer close(donec)
+ cp.establishEndpointWatch(prefix)
+ }()
+ return cp, donec
+ }
+
+ close(donec)
+ return cp, donec
+}
+
+func (cp *clusterProxy) establishEndpointWatch(prefix string) {
+ rm := rate.NewLimiter(rate.Limit(resolveRetryRate), resolveRetryRate)
+ for rm.Wait(cp.ctx) == nil {
+ wc, err := cp.em.NewWatchChannel(cp.ctx)
+ if err != nil {
+ cp.lg.Warn("failed to establish endpoint watch", zap.String("prefix", prefix), zap.Error(err))
+ continue
+ }
+ cp.monitor(wc)
+ }
+}
+
+func (cp *clusterProxy) monitor(wa endpoints.WatchChannel) {
+ for {
+ select {
+ case <-cp.ctx.Done():
+ cp.lg.Info("watching endpoints interrupted", zap.Error(cp.ctx.Err()))
+ return
+ case updates := <-wa:
+ cp.umu.Lock()
+ for _, up := range updates {
+ switch up.Op {
+ case endpoints.Add:
+ cp.umap[up.Endpoint.Addr] = up.Endpoint
+ case endpoints.Delete:
+ delete(cp.umap, up.Endpoint.Addr)
+ }
+ }
+ cp.umu.Unlock()
+ }
+ }
+}
+
+func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
+ if r.IsLearner {
+ return cp.memberAddAsLearner(ctx, r.PeerURLs)
+ }
+ return cp.memberAdd(ctx, r.PeerURLs)
+}
+
+func (cp *clusterProxy) memberAdd(ctx context.Context, peerURLs []string) (*pb.MemberAddResponse, error) {
+ mresp, err := cp.clus.MemberAdd(ctx, peerURLs)
+ if err != nil {
+ return nil, err
+ }
+ resp := (pb.MemberAddResponse)(*mresp)
+ return &resp, err
+}
+
+func (cp *clusterProxy) memberAddAsLearner(ctx context.Context, peerURLs []string) (*pb.MemberAddResponse, error) {
+ mresp, err := cp.clus.MemberAddAsLearner(ctx, peerURLs)
+ if err != nil {
+ return nil, err
+ }
+ resp := (pb.MemberAddResponse)(*mresp)
+ return &resp, err
+}
+
+func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
+ mresp, err := cp.clus.MemberRemove(ctx, r.ID)
+ if err != nil {
+ return nil, err
+ }
+ resp := (pb.MemberRemoveResponse)(*mresp)
+ return &resp, err
+}
+
+func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
+ mresp, err := cp.clus.MemberUpdate(ctx, r.ID, r.PeerURLs)
+ if err != nil {
+ return nil, err
+ }
+ resp := (pb.MemberUpdateResponse)(*mresp)
+ return &resp, err
+}
+
+func (cp *clusterProxy) membersFromUpdates() ([]*pb.Member, error) {
+ cp.umu.RLock()
+ defer cp.umu.RUnlock()
+ mbs := make([]*pb.Member, 0, len(cp.umap))
+ for addr, upt := range cp.umap {
+ m, err := decodeMeta(fmt.Sprint(upt.Metadata))
+ if err != nil {
+ return nil, err
+ }
+ mbs = append(mbs, &pb.Member{Name: m.Name, ClientURLs: []string{addr}})
+ }
+ return mbs, nil
+}
+
+// MemberList wraps member list API with following rules:
+// - If 'advaddr' is not empty and 'prefix' is not empty, return registered member lists via resolver
+// - If 'advaddr' is not empty and 'prefix' is not empty and registered grpc-proxy members haven't been fetched, return the 'advaddr'
+// - If 'advaddr' is not empty and 'prefix' is empty, return 'advaddr' without forcing it to 'register'
+// - If 'advaddr' is empty, forward to member list API
+func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
+ if cp.advaddr != "" {
+ if cp.prefix != "" {
+ mbs, err := cp.membersFromUpdates()
+ if err != nil {
+ return nil, err
+ }
+ if len(mbs) > 0 {
+ return &pb.MemberListResponse{Members: mbs}, nil
+ }
+ }
+ // prefix is empty or no grpc-proxy members haven't been registered
+ hostname, _ := os.Hostname()
+ return &pb.MemberListResponse{Members: []*pb.Member{{Name: hostname, ClientURLs: []string{cp.advaddr}}}}, nil
+ }
+ mresp, err := cp.clus.MemberList(ctx)
+ if err != nil {
+ return nil, err
+ }
+ resp := (pb.MemberListResponse)(*mresp)
+ return &resp, err
+}
+
+func (cp *clusterProxy) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) {
+ // TODO: implement
+ return nil, errors.New("not implemented")
+}
diff --git a/server/proxy/grpcproxy/doc.go b/etcd/proxy/grpcproxy/doc.go
similarity index 100%
rename from server/proxy/grpcproxy/doc.go
rename to etcd/proxy/grpcproxy/doc.go
diff --git a/etcd/proxy/grpcproxy/election.go b/etcd/proxy/grpcproxy/election.go
new file mode 100644
index 00000000000..5a35019fa18
--- /dev/null
+++ b/etcd/proxy/grpcproxy/election.go
@@ -0,0 +1,66 @@
+// Copyright 2017 The etcd Lockors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3election/v3electionpb"
+)
+
+type electionProxy struct {
+ client *clientv3.Client
+}
+
+func NewElectionProxy(client *clientv3.Client) v3electionpb.ElectionServer {
+ return &electionProxy{client: client}
+}
+
+func (ep *electionProxy) Campaign(ctx context.Context, req *v3electionpb.CampaignRequest) (*v3electionpb.CampaignResponse, error) {
+ return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Campaign(ctx, req)
+}
+
+func (ep *electionProxy) Proclaim(ctx context.Context, req *v3electionpb.ProclaimRequest) (*v3electionpb.ProclaimResponse, error) {
+ return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Proclaim(ctx, req)
+}
+
+func (ep *electionProxy) Leader(ctx context.Context, req *v3electionpb.LeaderRequest) (*v3electionpb.LeaderResponse, error) {
+ return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Leader(ctx, req)
+}
+
+func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb.Election_ObserveServer) error {
+ conn := ep.client.ActiveConnection()
+ ctx, cancel := context.WithCancel(s.Context())
+ defer cancel()
+ sc, err := v3electionpb.NewElectionClient(conn).Observe(ctx, req)
+ if err != nil {
+ return err
+ }
+ for {
+ rr, err := sc.Recv()
+ if err != nil {
+ return err
+ }
+ if err = s.Send(rr); err != nil {
+ return err
+ }
+ }
+}
+
+func (ep *electionProxy) Resign(ctx context.Context, req *v3electionpb.ResignRequest) (*v3electionpb.ResignResponse, error) {
+ return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Resign(ctx, req)
+}
diff --git a/etcd/proxy/grpcproxy/health.go b/etcd/proxy/grpcproxy/health.go
new file mode 100644
index 00000000000..b1e6081c754
--- /dev/null
+++ b/etcd/proxy/grpcproxy/health.go
@@ -0,0 +1,78 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/etcdhttp"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ "go.uber.org/zap"
+)
+
+// HandleHealth registers health handler on '/health'.
+func HandleHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet) etcdhttp.Health { return checkHealth(c) }))
+}
+
+// HandleProxyHealth registers health handler on '/proxy/health'.
+func HandleProxyHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ mux.Handle(etcdhttp.PathProxyHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet) etcdhttp.Health { return checkProxyHealth(c) }))
+}
+
+func checkHealth(c *clientv3.Client) etcdhttp.Health {
+ h := etcdhttp.Health{Health: "false"}
+ ctx, cancel := context.WithTimeout(c.Ctx(), time.Second)
+ _, err := c.Get(ctx, "a")
+ cancel()
+ if err == nil || err == rpctypes.ErrPermissionDenied {
+ h.Health = "true"
+ } else {
+ h.Reason = fmt.Sprintf("GET ERROR:%s", err)
+ }
+ return h
+}
+
+func checkProxyHealth(c *clientv3.Client) etcdhttp.Health {
+ if c == nil {
+ return etcdhttp.Health{Health: "false", Reason: "no connection to proxy"}
+ }
+ h := checkHealth(c)
+ if h.Health != "true" {
+ return h
+ }
+ ctx, cancel := context.WithTimeout(c.Ctx(), time.Second*3)
+ ch := c.Watch(ctx, "a", clientv3.WithCreatedNotify())
+ select {
+ case <-ch:
+ case <-ctx.Done():
+ h.Health = "false"
+ h.Reason = "WATCH TIMEOUT"
+ }
+ cancel()
+ return h
+}
diff --git a/etcd/proxy/grpcproxy/kv.go b/etcd/proxy/grpcproxy/kv.go
new file mode 100644
index 00000000000..3a9cc94138b
--- /dev/null
+++ b/etcd/proxy/grpcproxy/kv.go
@@ -0,0 +1,234 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/etcd/proxy/grpcproxy/cache"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+type kvProxy struct {
+ kv clientv3.KV
+ cache cache.Cache
+}
+
+func NewKvProxy(c *clientv3.Client) (pb.KVServer, <-chan struct{}) {
+ kv := &kvProxy{
+ kv: c.KV,
+ cache: cache.NewCache(cache.DefaultMaxEntries),
+ }
+ donec := make(chan struct{})
+ close(donec)
+ return kv, donec
+}
+
+func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ if r.Serializable {
+ resp, err := p.cache.Get(r)
+ switch err {
+ case nil:
+ return resp, nil
+ case cache.ErrCompacted:
+ return nil, err
+ }
+
+ }
+
+ resp, err := p.kv.Do(ctx, RangeRequestToOp(r))
+ if err != nil {
+ return nil, err
+ }
+
+ // cache linearizable as serializable
+ req := *r
+ req.Serializable = true
+ gresp := (*pb.RangeResponse)(resp.Get())
+ p.cache.Add(&req, gresp)
+
+ return gresp, nil
+}
+
+func (p *kvProxy) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ p.cache.Invalidate([]byte(r.Key), nil)
+
+ resp, err := p.kv.Do(ctx, PutRequestToOp(r))
+ return (*pb.PutResponse)(resp.Put()), err
+}
+
+func (p *kvProxy) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ p.cache.Invalidate([]byte(r.Key), []byte(r.RangeEnd))
+
+ resp, err := p.kv.Do(ctx, DelRequestToOp(r))
+ return (*pb.DeleteRangeResponse)(resp.Del()), err
+}
+
+func (p *kvProxy) txnToCache(reqs []*pb.RequestOp, resps []*pb.ResponseOp) {
+ for i := range resps {
+ if resps[i].ResponseOp_ResponsePut != nil {
+ p.cache.Invalidate([]byte(reqs[i].GetRequestPut().Key), nil)
+ }
+
+ if resps[i].ResponseOp_ResponseDeleteRange != nil {
+ rdr := reqs[i].GetRequestDeleteRange()
+ p.cache.Invalidate([]byte(rdr.Key), []byte(rdr.RangeEnd))
+ }
+ if resps[i].ResponseOp_ResponseRange != nil {
+ tv := resps[i].ResponseOp_ResponseRange
+ req := *(reqs[i].GetRequestRange())
+ req.Serializable = true
+ p.cache.Add(&req, tv.ResponseRange)
+ }
+
+ }
+}
+
+func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ op := TxnRequestToOp(r)
+ opResp, err := p.kv.Do(ctx, op)
+ if err != nil {
+ return nil, err
+ }
+ resp := opResp.Txn()
+
+ // txn may claim an outdated key is updated; be safe and invalidate
+ for _, cmp := range r.Compare {
+ p.cache.Invalidate([]byte(cmp.Key), []byte(cmp.RangeEnd))
+ }
+ // update any fetched keys
+ if resp.Succeeded {
+ p.txnToCache(r.Success, resp.Responses)
+ } else {
+ p.txnToCache(r.Failure, resp.Responses)
+ }
+
+ return (*pb.TxnResponse)(resp), nil
+}
+
+func (p *kvProxy) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+ var opts []clientv3.CompactOption
+ if r.Physical {
+ opts = append(opts, clientv3.WithCompactPhysical())
+ }
+
+ resp, err := p.kv.Compact(ctx, r.Revision, opts...)
+ if err == nil {
+ p.cache.Compact(r.Revision)
+ }
+
+ return (*pb.CompactionResponse)(resp), err
+}
+
+func requestOpToOp(union *pb.RequestOp) clientv3.Op {
+ if union.RequestOp_RequestRange != nil {
+ tv := union.RequestOp_RequestRange
+ if tv.RequestRange != nil {
+ return RangeRequestToOp(tv.RequestRange)
+ }
+ }
+ if union.RequestOp_RequestPut != nil {
+ tv := union.RequestOp_RequestPut
+ if tv.RequestPut != nil {
+ return PutRequestToOp(tv.RequestPut)
+ }
+ }
+ if union.RequestOp_RequestDeleteRange != nil {
+ tv := union.RequestOp_RequestDeleteRange
+ if tv.RequestDeleteRange != nil {
+ return DelRequestToOp(tv.RequestDeleteRange)
+ }
+ }
+ if union.RequestOp_RequestTxn != nil {
+ tv := union.RequestOp_RequestTxn
+ if tv.RequestTxn != nil {
+ return TxnRequestToOp(tv.RequestTxn)
+ }
+ }
+
+ panic("unknown request")
+}
+
+func RangeRequestToOp(r *pb.RangeRequest) clientv3.Op {
+ var opts []clientv3.OpOption
+ if len(r.RangeEnd) != 0 {
+ opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
+ }
+ opts = append(opts, clientv3.WithRev(r.Revision))
+ opts = append(opts, clientv3.WithLimit(r.Limit))
+ opts = append(opts, clientv3.WithSort(
+ clientv3.SortTarget(r.SortTarget),
+ clientv3.SortOrder(r.SortOrder)),
+ )
+ opts = append(opts, clientv3.WithMaxCreateRev(r.MaxCreateRevision))
+ opts = append(opts, clientv3.WithMinCreateRev(r.MinCreateRevision))
+ opts = append(opts, clientv3.WithMaxModRev(r.MaxModRevision))
+ opts = append(opts, clientv3.WithMinModRev(r.MinModRevision))
+ if r.CountOnly {
+ opts = append(opts, clientv3.WithCountOnly())
+ }
+ if r.KeysOnly {
+ opts = append(opts, clientv3.WithKeysOnly())
+ }
+ if r.Serializable {
+ opts = append(opts, clientv3.WithSerializable())
+ }
+
+ return clientv3.OpGet(string(r.Key), opts...)
+}
+
+func PutRequestToOp(r *pb.PutRequest) clientv3.Op {
+ opts := []clientv3.OpOption{}
+ opts = append(opts, clientv3.WithLease(clientv3.LeaseID(r.Lease)))
+ if r.IgnoreValue {
+ opts = append(opts, clientv3.WithIgnoreValue())
+ }
+ if r.IgnoreLease {
+ opts = append(opts, clientv3.WithIgnoreLease())
+ }
+ if r.PrevKv {
+ opts = append(opts, clientv3.WithPrevKV())
+ }
+ return clientv3.OpPut(string(r.Key), string(r.Value), opts...)
+}
+
+func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op {
+ opts := []clientv3.OpOption{}
+ if len(r.RangeEnd) != 0 {
+ opts = append(opts, clientv3.WithRange(string(r.RangeEnd)))
+ }
+ if r.PrevKv {
+ opts = append(opts, clientv3.WithPrevKV())
+ }
+ return clientv3.OpDelete(string(r.Key), opts...)
+}
+
+func TxnRequestToOp(r *pb.TxnRequest) clientv3.Op {
+ cmps := make([]clientv3.Cmp, len(r.Compare))
+ thenops := make([]clientv3.Op, len(r.Success))
+ elseops := make([]clientv3.Op, len(r.Failure))
+ for i := range r.Compare {
+ cmps[i] = (clientv3.Cmp)(*r.Compare[i])
+ }
+ for i := range r.Success {
+ thenops[i] = requestOpToOp(r.Success[i])
+ }
+ for i := range r.Failure {
+ elseops[i] = requestOpToOp(r.Failure[i])
+ }
+ return clientv3.OpTxn(cmps, thenops, elseops)
+}
diff --git a/server/proxy/grpcproxy/leader.go b/etcd/proxy/grpcproxy/leader.go
similarity index 97%
rename from server/proxy/grpcproxy/leader.go
rename to etcd/proxy/grpcproxy/leader.go
index 158e3ee8814..75c270f5dae 100644
--- a/server/proxy/grpcproxy/leader.go
+++ b/etcd/proxy/grpcproxy/leader.go
@@ -19,7 +19,7 @@ import (
"math"
"sync"
- clientv3 "go.etcd.io/etcd/client/v3"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
"golang.org/x/time/rate"
)
diff --git a/etcd/proxy/grpcproxy/lease.go b/etcd/proxy/grpcproxy/lease.go
new file mode 100644
index 00000000000..19f809e296e
--- /dev/null
+++ b/etcd/proxy/grpcproxy/lease.go
@@ -0,0 +1,384 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+type leaseProxy struct {
+ // leaseClient handles req from LeaseGrant() that requires a lease ID.
+ leaseClient pb.LeaseClient
+
+ lessor clientv3.Lease
+
+ ctx context.Context
+
+ leader *leader
+
+ // mu protects adding outstanding leaseProxyStream through wg.
+ mu sync.RWMutex
+
+ // wg waits until all outstanding leaseProxyStream quit.
+ wg sync.WaitGroup
+}
+
+func NewLeaseProxy(ctx context.Context, c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) {
+ cctx, cancel := context.WithCancel(ctx)
+ lp := &leaseProxy{
+ leaseClient: pb.NewLeaseClient(c.ActiveConnection()),
+ lessor: c.Lease,
+ ctx: cctx,
+ leader: newLeader(cctx, c.Watcher),
+ }
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ <-lp.leader.stopNotify()
+ lp.mu.Lock()
+ select {
+ case <-lp.ctx.Done():
+ case <-lp.leader.disconnectNotify():
+ cancel()
+ }
+ <-lp.ctx.Done()
+ lp.mu.Unlock()
+ lp.wg.Wait()
+ }()
+ return lp, ch
+}
+
+func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ rp, err := lp.leaseClient.LeaseGrant(ctx, cr, grpc.WaitForReady(true))
+ if err != nil {
+ return nil, err
+ }
+ lp.leader.gotLeader()
+ return rp, nil
+}
+
+func (lp *leaseProxy) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ r, err := lp.lessor.Revoke(ctx, clientv3.LeaseID(rr.ID))
+ if err != nil {
+ return nil, err
+ }
+ lp.leader.gotLeader()
+ return (*pb.LeaseRevokeResponse)(r), nil
+}
+
+func (lp *leaseProxy) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ var (
+ r *clientv3.LeaseTimeToLiveResponse
+ err error
+ )
+ if rr.Keys {
+ r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID), clientv3.WithAttachedKeys())
+ } else {
+ r, err = lp.lessor.TimeToLive(ctx, clientv3.LeaseID(rr.ID))
+ }
+ if err != nil {
+ return nil, err
+ }
+ rp := &pb.LeaseTimeToLiveResponse{
+ Header: r.ResponseHeader,
+ ID: int64(r.ID),
+ TTL: r.TTL,
+ GrantedTTL: r.GrantedTTL,
+ Keys: r.Keys,
+ }
+ return rp, err
+}
+
+func (lp *leaseProxy) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+ r, err := lp.lessor.Leases(ctx)
+ if err != nil {
+ return nil, err
+ }
+ leases := make([]*pb.LeaseStatus, len(r.Leases))
+ for i := range r.Leases {
+ leases[i] = &pb.LeaseStatus{ID: int64(r.Leases[i].ID)}
+ }
+ rp := &pb.LeaseLeasesResponse{
+ Header: r.ResponseHeader,
+ Leases: leases,
+ }
+ return rp, err
+}
+
+func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
+ lp.mu.Lock()
+ select {
+ case <-lp.ctx.Done():
+ lp.mu.Unlock()
+ return lp.ctx.Err()
+ default:
+ lp.wg.Add(1)
+ }
+ lp.mu.Unlock()
+
+ ctx, cancel := context.WithCancel(stream.Context())
+ lps := leaseProxyStream{
+ stream: stream,
+ lessor: lp.lessor,
+ keepAliveLeases: make(map[int64]*atomicCounter),
+ respc: make(chan *pb.LeaseKeepAliveResponse),
+ ctx: ctx,
+ cancel: cancel,
+ }
+
+ errc := make(chan error, 2)
+
+ var lostLeaderC <-chan struct{}
+ if md, ok := metadata.FromOutgoingContext(stream.Context()); ok {
+ v := md[rpctypes.MetadataRequireLeaderKey]
+ if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader {
+ lostLeaderC = lp.leader.lostNotify()
+ // if leader is known to be lost at creation time, avoid
+ // letting events through at all
+ select {
+ case <-lostLeaderC:
+ lp.wg.Done()
+ return rpctypes.ErrNoLeader
+ default:
+ }
+ }
+ }
+ stopc := make(chan struct{}, 3)
+ go func() {
+ defer func() { stopc <- struct{}{} }()
+ if err := lps.recvLoop(); err != nil {
+ errc <- err
+ }
+ }()
+
+ go func() {
+ defer func() { stopc <- struct{}{} }()
+ if err := lps.sendLoop(); err != nil {
+ errc <- err
+ }
+ }()
+
+ // tears down LeaseKeepAlive stream if leader goes down or entire leaseProxy is terminated.
+ go func() {
+ defer func() { stopc <- struct{}{} }()
+ select {
+ case <-lostLeaderC:
+ case <-ctx.Done():
+ case <-lp.ctx.Done():
+ }
+ }()
+
+ var err error
+ select {
+ case <-stopc:
+ stopc <- struct{}{}
+ case err = <-errc:
+ }
+ cancel()
+
+ // recv/send may only shutdown after function exits;
+ // this goroutine notifies lease proxy that the stream is through
+ go func() {
+ <-stopc
+ <-stopc
+ <-stopc
+ lps.close()
+ close(errc)
+ lp.wg.Done()
+ }()
+
+ select {
+ case <-lostLeaderC:
+ return rpctypes.ErrNoLeader
+ case <-lp.leader.disconnectNotify():
+ return status.Error(codes.Canceled, "the client connection is closing")
+ default:
+ if err != nil {
+ return err
+ }
+ return ctx.Err()
+ }
+}
+
+type leaseProxyStream struct {
+ stream pb.Lease_LeaseKeepAliveServer
+
+ lessor clientv3.Lease
+ // wg tracks keepAliveLoop goroutines
+ wg sync.WaitGroup
+ // mu protects keepAliveLeases
+ mu sync.RWMutex
+ // keepAliveLeases tracks how many outstanding keepalive requests which need responses are on a lease.
+ keepAliveLeases map[int64]*atomicCounter
+ // respc receives lease keepalive responses from etcd backend
+ respc chan *pb.LeaseKeepAliveResponse
+
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+func (lps *leaseProxyStream) recvLoop() error {
+ for {
+ rr, err := lps.stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ lps.mu.Lock()
+ neededResps, ok := lps.keepAliveLeases[rr.ID]
+ if !ok {
+ neededResps = &atomicCounter{}
+ lps.keepAliveLeases[rr.ID] = neededResps
+ lps.wg.Add(1)
+ go func() {
+ defer lps.wg.Done()
+ if err := lps.keepAliveLoop(rr.ID, neededResps); err != nil {
+ lps.cancel()
+ }
+ }()
+ }
+ neededResps.add(1)
+ lps.mu.Unlock()
+ }
+}
+
+func (lps *leaseProxyStream) keepAliveLoop(leaseID int64, neededResps *atomicCounter) error {
+ cctx, ccancel := context.WithCancel(lps.ctx)
+ defer ccancel()
+ respc, err := lps.lessor.KeepAlive(cctx, clientv3.LeaseID(leaseID))
+ if err != nil {
+ return err
+ }
+ // ticker expires when loop hasn't received keepalive within TTL
+ var ticker <-chan time.Time
+ for {
+ select {
+ case <-ticker:
+ lps.mu.Lock()
+ // if there are outstanding keepAlive reqs at the moment of ticker firing,
+ // don't close keepAliveLoop(), let it continuing to process the KeepAlive reqs.
+ if neededResps.get() > 0 {
+ lps.mu.Unlock()
+ ticker = nil
+ continue
+ }
+ delete(lps.keepAliveLeases, leaseID)
+ lps.mu.Unlock()
+ return nil
+ case rp, ok := <-respc:
+ if !ok {
+ lps.mu.Lock()
+ delete(lps.keepAliveLeases, leaseID)
+ lps.mu.Unlock()
+ if neededResps.get() == 0 {
+ return nil
+ }
+ ttlResp, err := lps.lessor.TimeToLive(cctx, clientv3.LeaseID(leaseID))
+ if err != nil {
+ return err
+ }
+ r := &pb.LeaseKeepAliveResponse{
+ Header: ttlResp.ResponseHeader,
+ ID: int64(ttlResp.ID),
+ TTL: ttlResp.TTL,
+ }
+ for neededResps.get() > 0 {
+ select {
+ case lps.respc <- r:
+ neededResps.add(-1)
+ case <-lps.ctx.Done():
+ return nil
+ }
+ }
+ return nil
+ }
+ if neededResps.get() == 0 {
+ continue
+ }
+ ticker = time.After(time.Duration(rp.TTL) * time.Second)
+ r := &pb.LeaseKeepAliveResponse{
+ Header: rp.ResponseHeader,
+ ID: int64(rp.ID),
+ TTL: rp.TTL,
+ }
+ lps.replyToClient(r, neededResps)
+ }
+ }
+}
+
+func (lps *leaseProxyStream) replyToClient(r *pb.LeaseKeepAliveResponse, neededResps *atomicCounter) {
+ timer := time.After(500 * time.Millisecond)
+ for neededResps.get() > 0 {
+ select {
+ case lps.respc <- r:
+ neededResps.add(-1)
+ case <-timer:
+ return
+ case <-lps.ctx.Done():
+ return
+ }
+ }
+}
+
+func (lps *leaseProxyStream) sendLoop() error {
+ for {
+ select {
+ case lrp, ok := <-lps.respc:
+ if !ok {
+ return nil
+ }
+ if err := lps.stream.Send(lrp); err != nil {
+ return err
+ }
+ case <-lps.ctx.Done():
+ return lps.ctx.Err()
+ }
+ }
+}
+
+func (lps *leaseProxyStream) close() {
+ lps.cancel()
+ lps.wg.Wait()
+ // only close respc channel if all the keepAliveLoop() goroutines have finished
+ // this ensures those goroutines don't send resp to a closed resp channel
+ close(lps.respc)
+}
+
+type atomicCounter struct {
+ counter int64
+}
+
+func (ac *atomicCounter) add(delta int64) {
+ atomic.AddInt64(&ac.counter, delta)
+}
+
+func (ac *atomicCounter) get() int64 {
+ return atomic.LoadInt64(&ac.counter)
+}
diff --git a/etcd/proxy/grpcproxy/lock.go b/etcd/proxy/grpcproxy/lock.go
new file mode 100644
index 00000000000..35c83d640e9
--- /dev/null
+++ b/etcd/proxy/grpcproxy/lock.go
@@ -0,0 +1,38 @@
+// Copyright 2017 The etcd Lockors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3lock/v3lockpb"
+)
+
+type lockProxy struct {
+ client *clientv3.Client
+}
+
+func NewLockProxy(client *clientv3.Client) v3lockpb.LockServer {
+ return &lockProxy{client: client}
+}
+
+func (lp *lockProxy) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
+ return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Lock(ctx, req)
+}
+
+func (lp *lockProxy) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
+ return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Unlock(ctx, req)
+}
diff --git a/etcd/proxy/grpcproxy/maintenance.go b/etcd/proxy/grpcproxy/maintenance.go
new file mode 100644
index 00000000000..bfe69cc3db9
--- /dev/null
+++ b/etcd/proxy/grpcproxy/maintenance.go
@@ -0,0 +1,96 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+ "io"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+type maintenanceProxy struct {
+ client *clientv3.Client
+}
+
+func NewMaintenanceProxy(c *clientv3.Client) pb.MaintenanceServer {
+ return &maintenanceProxy{
+ client: c,
+ }
+}
+
+func (mp *maintenanceProxy) Defragment(ctx context.Context, dr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+ conn := mp.client.ActiveConnection()
+ return pb.NewMaintenanceClient(conn).Defragment(ctx, dr)
+}
+
+func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenance_SnapshotServer) error {
+ conn := mp.client.ActiveConnection()
+ ctx, cancel := context.WithCancel(stream.Context())
+ defer cancel()
+
+ ctx = withClientAuthToken(ctx, stream.Context())
+
+ sc, err := pb.NewMaintenanceClient(conn).Snapshot(ctx, sr)
+ if err != nil {
+ return err
+ }
+
+ for {
+ rr, err := sc.Recv()
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ err = stream.Send(rr)
+ if err != nil {
+ return err
+ }
+ }
+}
+
+func (mp *maintenanceProxy) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+ conn := mp.client.ActiveConnection()
+ return pb.NewMaintenanceClient(conn).Hash(ctx, r)
+}
+
+func (mp *maintenanceProxy) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+ conn := mp.client.ActiveConnection()
+ return pb.NewMaintenanceClient(conn).HashKV(ctx, r)
+}
+
+func (mp *maintenanceProxy) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ conn := mp.client.ActiveConnection()
+ return pb.NewMaintenanceClient(conn).Alarm(ctx, r)
+}
+
+func (mp *maintenanceProxy) Status(ctx context.Context, r *pb.StatusRequest) (*pb.StatusResponse, error) {
+ conn := mp.client.ActiveConnection()
+ return pb.NewMaintenanceClient(conn).Status(ctx, r)
+}
+
+func (mp *maintenanceProxy) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+ conn := mp.client.ActiveConnection()
+ return pb.NewMaintenanceClient(conn).MoveLeader(ctx, r)
+}
+
+func (mp *maintenanceProxy) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ conn := mp.client.ActiveConnection()
+ return pb.NewMaintenanceClient(conn).Downgrade(ctx, r)
+}
diff --git a/server/proxy/grpcproxy/register.go b/etcd/proxy/grpcproxy/register.go
similarity index 88%
rename from server/proxy/grpcproxy/register.go
rename to etcd/proxy/grpcproxy/register.go
index 4fafb481022..505a73a8ea9 100644
--- a/server/proxy/grpcproxy/register.go
+++ b/etcd/proxy/grpcproxy/register.go
@@ -18,9 +18,10 @@ import (
"encoding/json"
"os"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
- "go.etcd.io/etcd/client/v3/naming/endpoints"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/naming/endpoints"
"go.uber.org/zap"
"golang.org/x/time/rate"
@@ -29,7 +30,7 @@ import (
// allow maximum 1 retry per second
const registerRetryRate = 1
-// Register registers itself as a grpc-proxy server by writing prefixed-key
+// Register registers itself as a grpc-proxy etcd by writing prefixed-key
// with session of specified TTL (in seconds). The returned channel is closed
// when the client's context is canceled.
func Register(lg *zap.Logger, c *clientv3.Client, prefix string, addr string, ttl int) <-chan struct{} {
@@ -51,7 +52,7 @@ func Register(lg *zap.Logger, c *clientv3.Client, prefix string, addr string, tt
return
case <-ss.Done():
- lg.Warn("session expired; possible network partition or server restart")
+ lg.Warn("session expired; possible network partition or etcd restart")
lg.Warn("creating a new session to rejoin")
continue
}
diff --git a/etcd/proxy/grpcproxy/util.go b/etcd/proxy/grpcproxy/util.go
new file mode 100644
index 00000000000..f3cef2a5d38
--- /dev/null
+++ b/etcd/proxy/grpcproxy/util.go
@@ -0,0 +1,75 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+)
+
+func getAuthTokenFromClient(ctx context.Context) string {
+ md, ok := metadata.FromIncomingContext(ctx)
+ if ok {
+ ts, ok := md[rpctypes.TokenFieldNameGRPC]
+ if ok {
+ return ts[0]
+ }
+ }
+ return ""
+}
+
+func withClientAuthToken(ctx, ctxWithToken context.Context) context.Context {
+ token := getAuthTokenFromClient(ctxWithToken)
+ if token != "" {
+ ctx = context.WithValue(ctx, rpctypes.TokenFieldNameGRPC, token)
+ }
+ return ctx
+}
+
+type proxyTokenCredential struct {
+ token string
+}
+
+func (cred *proxyTokenCredential) RequireTransportSecurity() bool {
+ return false
+}
+
+func (cred *proxyTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
+ return map[string]string{
+ rpctypes.TokenFieldNameGRPC: cred.token,
+ }, nil
+}
+
+func AuthUnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ token := getAuthTokenFromClient(ctx)
+ if token != "" {
+ tokenCred := &proxyTokenCredential{token}
+ opts = append(opts, grpc.PerRPCCredentials(tokenCred))
+ }
+ return invoker(ctx, method, req, reply, cc, opts...)
+}
+
+func AuthStreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ tokenif := ctx.Value(rpctypes.TokenFieldNameGRPC)
+ if tokenif != nil {
+ tokenCred := &proxyTokenCredential{tokenif.(string)}
+ opts = append(opts, grpc.PerRPCCredentials(tokenCred))
+ }
+ return streamer(ctx, desc, cc, method, opts...)
+}
diff --git a/etcd/proxy/grpcproxy/watch.go b/etcd/proxy/grpcproxy/watch.go
new file mode 100644
index 00000000000..45a1ec74fab
--- /dev/null
+++ b/etcd/proxy/grpcproxy/watch.go
@@ -0,0 +1,315 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "context"
+ "sync"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v3rpc"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+type watchProxy struct {
+ cw clientv3.Watcher
+ ctx context.Context
+
+ leader *leader
+
+ ranges *watchRanges
+
+ // mu protects adding outstanding watch servers through wg.
+ mu sync.Mutex
+
+ // wg waits until all outstanding watch servers quit.
+ wg sync.WaitGroup
+
+ // kv is used for permission checking
+ kv clientv3.KV
+ lg *zap.Logger
+}
+
+func NewWatchProxy(ctx context.Context, lg *zap.Logger, c *clientv3.Client) (pb.WatchServer, <-chan struct{}) {
+ cctx, cancel := context.WithCancel(ctx)
+ wp := &watchProxy{
+ cw: c.Watcher,
+ ctx: cctx,
+ leader: newLeader(cctx, c.Watcher),
+
+ kv: c.KV, // for permission checking
+ lg: lg,
+ }
+ wp.ranges = newWatchRanges(wp)
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ <-wp.leader.stopNotify()
+ wp.mu.Lock()
+ select {
+ case <-wp.ctx.Done():
+ case <-wp.leader.disconnectNotify():
+ cancel()
+ }
+ <-wp.ctx.Done()
+ wp.mu.Unlock()
+ wp.wg.Wait()
+ wp.ranges.stop()
+ }()
+ return wp, ch
+}
+
+func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) {
+ wp.mu.Lock()
+ select {
+ case <-wp.ctx.Done():
+ wp.mu.Unlock()
+ select {
+ case <-wp.leader.disconnectNotify():
+ return status.Error(codes.Canceled, "the client connection is closing")
+ default:
+ return wp.ctx.Err()
+ }
+ default:
+ wp.wg.Add(1)
+ }
+ wp.mu.Unlock()
+
+ ctx, cancel := context.WithCancel(stream.Context())
+ wps := &watchProxyStream{
+ ranges: wp.ranges,
+ watchers: make(map[int64]*watcher),
+ stream: stream,
+ watchCh: make(chan *pb.WatchResponse, 1024),
+ ctx: ctx,
+ cancel: cancel,
+ kv: wp.kv,
+ lg: wp.lg,
+ }
+
+ var lostLeaderC <-chan struct{}
+ if md, ok := metadata.FromOutgoingContext(stream.Context()); ok {
+ v := md[rpctypes.MetadataRequireLeaderKey]
+ if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader {
+ lostLeaderC = wp.leader.lostNotify()
+ // if leader is known to be lost at creation time, avoid
+ // letting events through at all
+ select {
+ case <-lostLeaderC:
+ wp.wg.Done()
+ return rpctypes.ErrNoLeader
+ default:
+ }
+ }
+ }
+
+ // post to stopc => terminate etcd stream; can't use a waitgroup
+ // since all goroutines will only terminate after Watch() exits.
+ stopc := make(chan struct{}, 3)
+ go func() {
+ defer func() { stopc <- struct{}{} }()
+ wps.recvLoop()
+ }()
+ go func() {
+ defer func() { stopc <- struct{}{} }()
+ wps.sendLoop()
+ }()
+ // tear down watch if leader goes down or entire watch proxy is terminated
+ go func() {
+ defer func() { stopc <- struct{}{} }()
+ select {
+ case <-lostLeaderC:
+ case <-ctx.Done():
+ case <-wp.ctx.Done():
+ }
+ }()
+
+ <-stopc
+ cancel()
+
+ // recv/send may only shutdown after function exits;
+ // goroutine notifies proxy that stream is through
+ go func() {
+ <-stopc
+ <-stopc
+ wps.close()
+ wp.wg.Done()
+ }()
+
+ select {
+ case <-lostLeaderC:
+ return rpctypes.ErrNoLeader
+ case <-wp.leader.disconnectNotify():
+ return status.Error(codes.Canceled, "the client connection is closing")
+ default:
+ return wps.ctx.Err()
+ }
+}
+
+// watchProxyStream forwards etcd watch events to a proxied client stream.
+type watchProxyStream struct {
+ ranges *watchRanges
+
+ // mu protects watchers and nextWatcherID
+ mu sync.Mutex
+ // watchers receive events from watch broadcast.
+ watchers map[int64]*watcher
+ // nextWatcherID is the id to assign the next watcher on this stream.
+ nextWatcherID int64
+
+ stream pb.Watch_WatchServer
+
+ // watchCh receives watch responses from the watchers.
+ watchCh chan *pb.WatchResponse
+
+ ctx context.Context
+ cancel context.CancelFunc
+
+ // kv is used for permission checking
+ kv clientv3.KV
+ lg *zap.Logger
+}
+
+func (wps *watchProxyStream) close() {
+ var wg sync.WaitGroup
+ wps.cancel()
+ wps.mu.Lock()
+ wg.Add(len(wps.watchers))
+ for _, wpsw := range wps.watchers {
+ go func(w *watcher) {
+ wps.ranges.delete(w)
+ wg.Done()
+ }(wpsw)
+ }
+ wps.watchers = nil
+ wps.mu.Unlock()
+
+ wg.Wait()
+
+ close(wps.watchCh)
+}
+
+func (wps *watchProxyStream) checkPermissionForWatch(key, rangeEnd []byte) error {
+ if len(key) == 0 {
+ // If the length of the key is 0, we need to obtain full range.
+ // look at clientv3.WithPrefix()
+ key = []byte{0}
+ rangeEnd = []byte{0}
+ }
+ req := &pb.RangeRequest{
+ Serializable: true,
+ Key: string(key),
+ RangeEnd: string(rangeEnd),
+ CountOnly: true,
+ Limit: 1,
+ }
+ _, err := wps.kv.Do(wps.ctx, RangeRequestToOp(req))
+ return err
+}
+
+func (wps *watchProxyStream) recvLoop() error {
+ for {
+ req, err := wps.stream.Recv()
+ if err != nil {
+ return err
+ }
+ if req.WatchRequest_CreateRequest != nil {
+ uv := req.WatchRequest_CreateRequest
+ cr := uv.CreateRequest
+
+ if err := wps.checkPermissionForWatch([]byte(cr.Key), []byte(cr.RangeEnd)); err != nil {
+ wps.watchCh <- &pb.WatchResponse{
+ Header: &pb.ResponseHeader{},
+ WatchId: -1,
+ Created: true,
+ Canceled: true,
+ CancelReason: err.Error(),
+ }
+ continue
+ }
+
+ wps.mu.Lock()
+ w := &watcher{
+ wr: watchRange{string(cr.Key), string(cr.RangeEnd)},
+ id: wps.nextWatcherID,
+ wps: wps,
+
+ nextrev: cr.StartRevision,
+ progress: cr.ProgressNotify,
+ prevKV: cr.PrevKv,
+ filters: v3rpc.FiltersFromRequest(cr),
+ }
+ if !w.wr.valid() {
+ w.post(&pb.WatchResponse{WatchId: -1, Created: true, Canceled: true})
+ wps.mu.Unlock()
+ continue
+ }
+ wps.nextWatcherID++
+ w.nextrev = cr.StartRevision
+ wps.watchers[w.id] = w
+ wps.ranges.add(w)
+ wps.mu.Unlock()
+ wps.lg.Debug("create watcher", zap.String("key", w.wr.key), zap.String("end", w.wr.end), zap.Int64("watcherId", wps.nextWatcherID))
+ } else if req.WatchRequest_CancelRequest != nil {
+ uv := req.WatchRequest_CancelRequest
+ wps.delete(uv.CancelRequest.WatchId)
+ wps.lg.Debug("cancel watcher", zap.Int64("watcherId", uv.CancelRequest.WatchId))
+ } else {
+ // Panic or Fatalf would allow to network clients to crash the serve remotely.
+ wps.lg.Error("not supported request type by gRPC proxy", zap.Stringer("request", req))
+ }
+ }
+}
+
+func (wps *watchProxyStream) sendLoop() {
+ for {
+ select {
+ case wresp, ok := <-wps.watchCh:
+ if !ok {
+ return
+ }
+ if err := wps.stream.Send(wresp); err != nil {
+ return
+ }
+ case <-wps.ctx.Done():
+ return
+ }
+ }
+}
+
+func (wps *watchProxyStream) delete(id int64) {
+ wps.mu.Lock()
+ defer wps.mu.Unlock()
+
+ w, ok := wps.watchers[id]
+ if !ok {
+ return
+ }
+ wps.ranges.delete(w)
+ delete(wps.watchers, id)
+ resp := &pb.WatchResponse{
+ Header: &w.lastHeader,
+ WatchId: id,
+ Canceled: true,
+ }
+ wps.watchCh <- resp
+}
diff --git a/server/proxy/grpcproxy/watch_broadcast.go b/etcd/proxy/grpcproxy/watch_broadcast.go
similarity index 91%
rename from server/proxy/grpcproxy/watch_broadcast.go
rename to etcd/proxy/grpcproxy/watch_broadcast.go
index 1d9a43df143..acd277c01bd 100644
--- a/server/proxy/grpcproxy/watch_broadcast.go
+++ b/etcd/proxy/grpcproxy/watch_broadcast.go
@@ -19,15 +19,15 @@ import (
"sync"
"time"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- clientv3 "go.etcd.io/etcd/client/v3"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
"go.uber.org/zap"
)
-// watchBroadcast broadcasts a server watcher to many client watchers.
+// watchBroadcast broadcasts a etcd watcher to many client watchers.
type watchBroadcast struct {
- // cancel stops the underlying etcd server watcher and closes ch.
+ // cancel stops the underlying etcd etcd watcher and closes ch.
cancel context.CancelFunc
donec chan struct{}
@@ -88,7 +88,6 @@ func (wb *watchBroadcast) bcast(wr clientv3.WatchResponse) {
r.send(wr)
}
if len(wb.receivers) > 0 {
- eventsCoalescing.Add(float64(len(wb.receivers) - 1))
}
}
@@ -122,10 +121,10 @@ func (wb *watchBroadcast) add(w *watcher) bool {
return false
}
wb.receivers[w] = struct{}{}
- watchersCoalescing.Inc()
return true
}
+
func (wb *watchBroadcast) delete(w *watcher) {
wb.mu.Lock()
defer wb.mu.Unlock()
@@ -135,7 +134,6 @@ func (wb *watchBroadcast) delete(w *watcher) {
delete(wb.receivers, w)
if len(wb.receivers) > 0 {
// do not dec the only left watcher for coalescing.
- watchersCoalescing.Dec()
}
}
@@ -150,7 +148,6 @@ func (wb *watchBroadcast) empty() bool { return wb.size() == 0 }
func (wb *watchBroadcast) stop() {
if !wb.empty() {
// do not dec the only left watcher for coalescing.
- watchersCoalescing.Sub(float64(wb.size() - 1))
}
wb.cancel()
diff --git a/server/proxy/grpcproxy/watch_broadcasts.go b/etcd/proxy/grpcproxy/watch_broadcasts.go
similarity index 97%
rename from server/proxy/grpcproxy/watch_broadcasts.go
rename to etcd/proxy/grpcproxy/watch_broadcasts.go
index dacd3007d1d..2dddea26a10 100644
--- a/server/proxy/grpcproxy/watch_broadcasts.go
+++ b/etcd/proxy/grpcproxy/watch_broadcasts.go
@@ -63,7 +63,7 @@ func (wbs *watchBroadcasts) coalesce(wb *watchBroadcast) {
wbswb.mu.Lock()
// 1. check if wbswb is behind wb so it won't skip any events in wb
// 2. ensure wbswb started; nextrev == 0 may mean wbswb is waiting
- // for a current watcher and expects a create event from the server.
+ // for a current watcher and expects a create event from the etcd.
if wb.nextrev >= wbswb.nextrev && wbswb.responses > 0 {
for w := range wb.receivers {
wbswb.receivers[w] = struct{}{}
diff --git a/server/proxy/grpcproxy/watch_ranges.go b/etcd/proxy/grpcproxy/watch_ranges.go
similarity index 100%
rename from server/proxy/grpcproxy/watch_ranges.go
rename to etcd/proxy/grpcproxy/watch_ranges.go
diff --git a/etcd/proxy/grpcproxy/watcher.go b/etcd/proxy/grpcproxy/watcher.go
new file mode 100644
index 00000000000..d8aa082119d
--- /dev/null
+++ b/etcd/proxy/grpcproxy/watcher.go
@@ -0,0 +1,130 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package grpcproxy
+
+import (
+ "time"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+)
+
+type watchRange struct {
+ key, end string
+}
+
+func (wr *watchRange) valid() bool {
+ return len(wr.end) == 0 || wr.end > wr.key || (wr.end[0] == 0 && len(wr.end) == 1)
+}
+
+type watcher struct {
+ // user configuration
+
+ wr watchRange
+ filters []mvcc.FilterFunc
+ progress bool
+ prevKV bool
+
+ // id is the id returned to the client on its watch stream.
+ id int64
+ // nextrev is the minimum expected next event revision.
+ nextrev int64
+ // lastHeader has the last header sent over the stream.
+ lastHeader pb.ResponseHeader
+
+ // wps is the parent.
+ wps *watchProxyStream
+}
+
+// send filters out repeated events by discarding revisions older
+// than the last one sent over the watch channel.
+func (w *watcher) send(wr clientv3.WatchResponse) {
+ if wr.IsProgressNotify() && !w.progress {
+ return
+ }
+ if w.nextrev > wr.Header.Revision && len(wr.Events) > 0 {
+ return
+ }
+ if w.nextrev == 0 {
+ // current watch; expect updates following this revision
+ w.nextrev = wr.Header.Revision + 1
+ }
+
+ events := make([]*mvccpb.Event, 0, len(wr.Events))
+
+ var lastRev int64
+ for i := range wr.Events {
+ ev := (*mvccpb.Event)(wr.Events[i])
+ if ev.Kv.ModRevision < w.nextrev {
+ continue
+ } else {
+ // We cannot update w.rev here.
+ // txn can have multiple events with the same rev.
+ // If w.nextrev updates here, it would skip events in the same txn.
+ lastRev = ev.Kv.ModRevision
+ }
+
+ filtered := false
+ for _, filter := range w.filters {
+ if filter(*ev) {
+ filtered = true
+ break
+ }
+ }
+ if filtered {
+ continue
+ }
+
+ if !w.prevKV {
+ evCopy := *ev
+ evCopy.PrevKv = nil
+ ev = &evCopy
+ }
+ events = append(events, ev)
+ }
+
+ if lastRev >= w.nextrev {
+ w.nextrev = lastRev + 1
+ }
+
+ // all events are filtered out?
+ if !wr.IsProgressNotify() && !wr.Created && len(events) == 0 && wr.CompactRevision == 0 {
+ return
+ }
+
+ w.lastHeader = wr.Header
+ w.post(&pb.WatchResponse{
+ Header: &wr.Header,
+ Created: wr.Created,
+ CompactRevision: wr.CompactRevision,
+ Canceled: wr.Canceled,
+ WatchId: w.id,
+ Events: events,
+ })
+}
+
+// post puts a watch response on the watcher's proxy stream channel
+func (w *watcher) post(wr *pb.WatchResponse) bool {
+ select {
+ case w.wps.watchCh <- wr:
+ case <-time.After(50 * time.Millisecond):
+ w.wps.cancel()
+ w.wps.lg.Error("failed to put a watch response on the watcher's proxy stream channel,err is timeout")
+ return false
+ }
+ return true
+}
diff --git a/etcd/proxy/httpproxy/director.go b/etcd/proxy/httpproxy/director.go
new file mode 100644
index 00000000000..e20e2226a0d
--- /dev/null
+++ b/etcd/proxy/httpproxy/director.go
@@ -0,0 +1,179 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpproxy
+
+import (
+ "math/rand"
+ "net/url"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+// defaultRefreshInterval is the default proxyRefreshIntervalMs value
+// as in etcdmain/config.go.
+const defaultRefreshInterval = 30000 * time.Millisecond
+
+var once sync.Once
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+func newDirector(lg *zap.Logger, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) *director {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ d := &director{
+ lg: lg,
+ uf: urlsFunc,
+ failureWait: failureWait,
+ }
+ d.refresh()
+ go func() {
+ // In order to prevent missing proxy endpoints in the first try:
+ // when given refresh interval of defaultRefreshInterval or greater
+ // and whenever there is no available proxy endpoints,
+ // give 1-second refreshInterval.
+ for {
+ es := d.endpoints()
+ ri := refreshInterval
+ if ri >= defaultRefreshInterval {
+ if len(es) == 0 {
+ ri = time.Second
+ }
+ }
+ if len(es) > 0 {
+ once.Do(func() {
+ var sl []string
+ for _, e := range es {
+ sl = append(sl, e.URL.String())
+ }
+ lg.Info("endpoints found", zap.Strings("endpoints", sl))
+ })
+ }
+ time.Sleep(ri)
+ d.refresh()
+ }
+ }()
+ return d
+}
+
+type director struct {
+ sync.Mutex
+ lg *zap.Logger
+ ep []*endpoint
+ uf GetProxyURLs
+ failureWait time.Duration
+}
+
+func (d *director) refresh() {
+ urls := d.uf()
+ d.Lock()
+ defer d.Unlock()
+ var endpoints []*endpoint
+ for _, u := range urls {
+ uu, err := url.Parse(u)
+ if err != nil {
+ d.lg.Info("upstream URL invalid", zap.Error(err))
+ continue
+ }
+ endpoints = append(endpoints, newEndpoint(d.lg, *uu, d.failureWait))
+ }
+
+ // shuffle array to avoid connections being "stuck" to a single endpoint
+ for i := range endpoints {
+ j := rand.Intn(i + 1)
+ endpoints[i], endpoints[j] = endpoints[j], endpoints[i]
+ }
+
+ d.ep = endpoints
+}
+
+func (d *director) endpoints() []*endpoint {
+ d.Lock()
+ defer d.Unlock()
+ filtered := make([]*endpoint, 0)
+ for _, ep := range d.ep {
+ if ep.Available {
+ filtered = append(filtered, ep)
+ }
+ }
+
+ return filtered
+}
+
+func newEndpoint(lg *zap.Logger, u url.URL, failureWait time.Duration) *endpoint {
+ ep := endpoint{
+ lg: lg,
+ URL: u,
+ Available: true,
+ failFunc: timedUnavailabilityFunc(failureWait),
+ }
+
+ return &ep
+}
+
+type endpoint struct {
+ sync.Mutex
+
+ lg *zap.Logger
+ URL url.URL
+ Available bool
+
+ failFunc func(ep *endpoint)
+}
+
+func (ep *endpoint) Failed() {
+ ep.Lock()
+ if !ep.Available {
+ ep.Unlock()
+ return
+ }
+
+ ep.Available = false
+ ep.Unlock()
+
+ if ep.lg != nil {
+ ep.lg.Info("marked endpoint unavailable", zap.String("endpoint", ep.URL.String()))
+ }
+
+ if ep.failFunc == nil {
+ if ep.lg != nil {
+ ep.lg.Info(
+ "no failFunc defined, endpoint will be unavailable forever",
+ zap.String("endpoint", ep.URL.String()),
+ )
+ }
+ return
+ }
+
+ ep.failFunc(ep)
+}
+
+func timedUnavailabilityFunc(wait time.Duration) func(*endpoint) {
+ return func(ep *endpoint) {
+ time.AfterFunc(wait, func() {
+ ep.Available = true
+ if ep.lg != nil {
+ ep.lg.Info(
+ "marked endpoint available, to retest connectivity",
+ zap.String("endpoint", ep.URL.String()),
+ )
+ }
+ })
+ }
+}
diff --git a/etcd/proxy/httpproxy/doc.go b/etcd/proxy/httpproxy/doc.go
new file mode 100644
index 00000000000..7a45099120c
--- /dev/null
+++ b/etcd/proxy/httpproxy/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package httpproxy implements etcd httpproxy. The etcd proxy acts as a reverse
+// http proxy forwarding client requests to active etcd cluster members, and does
+// not participate in consensus.
+package httpproxy
diff --git a/etcd/proxy/httpproxy/proxy.go b/etcd/proxy/httpproxy/proxy.go
new file mode 100644
index 00000000000..c8f27bf01df
--- /dev/null
+++ b/etcd/proxy/httpproxy/proxy.go
@@ -0,0 +1,121 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpproxy
+
+import (
+ "encoding/json"
+ "net/http"
+ "strings"
+ "time"
+
+ "go.uber.org/zap"
+ "golang.org/x/net/http2"
+)
+
+const (
+ // DefaultMaxIdleConnsPerHost indicates the default maximum idle connection
+ // count maintained between proxy and each member. We set it to 128 to
+ // let proxy handle 128 concurrent requests in long term smoothly.
+ // If the number of concurrent requests is bigger than this value,
+ // proxy needs to create one new connection when handling each request in
+ // the delta, which is bad because the creation consumes resource and
+ // may eat up ephemeral ports.
+ DefaultMaxIdleConnsPerHost = 128
+)
+
+// GetProxyURLs is a function which should return the current set of URLs to
+// which client requests should be proxied. This function will be queried
+// periodically by the proxy Handler to refresh the set of available
+// backends.
+type GetProxyURLs func() []string
+
+// NewHandler creates a new HTTP handler, listening on the given transport,
+// which will proxy requests to an etcd cluster.
+// The handler will periodically update its view of the cluster.
+func NewHandler(lg *zap.Logger, t *http.Transport, urlsFunc GetProxyURLs, failureWait time.Duration, refreshInterval time.Duration) http.Handler {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ if t.TLSClientConfig != nil {
+ // Enable http2, see Issue 5033.
+ err := http2.ConfigureTransport(t)
+ if err != nil {
+ lg.Info("Error enabling Transport HTTP/2 support", zap.Error(err))
+ }
+ }
+
+ p := &reverseProxy{
+ lg: lg,
+ director: newDirector(lg, urlsFunc, failureWait, refreshInterval),
+ transport: t,
+ }
+
+ mux := http.NewServeMux()
+ mux.Handle("/", p)
+ mux.HandleFunc("/v2/config/local/proxy", p.configHandler)
+
+ return mux
+}
+
+// NewReadonlyHandler wraps the given HTTP handler to allow only GET requests
+func NewReadonlyHandler(hdlr http.Handler) http.Handler {
+ readonly := readonlyHandlerFunc(hdlr)
+ return http.HandlerFunc(readonly)
+}
+
+func readonlyHandlerFunc(next http.Handler) func(http.ResponseWriter, *http.Request) {
+ return func(w http.ResponseWriter, req *http.Request) {
+ if req.Method != "GET" {
+ w.WriteHeader(http.StatusNotImplemented)
+ return
+ }
+
+ next.ServeHTTP(w, req)
+ }
+}
+
+func (p *reverseProxy) configHandler(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+
+ eps := p.director.endpoints()
+ epstr := make([]string, len(eps))
+ for i, e := range eps {
+ epstr[i] = e.URL.String()
+ }
+
+ proxyConfig := struct {
+ Endpoints []string `json:"endpoints"`
+ }{
+ Endpoints: epstr,
+ }
+
+ json.NewEncoder(w).Encode(proxyConfig)
+}
+
+// allowMethod verifies that the given method is one of the allowed methods,
+// and if not, it writes an error to w. A boolean is returned indicating
+// whether or not the method is allowed.
+func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
+ for _, meth := range ms {
+ if m == meth {
+ return true
+ }
+ }
+ w.Header().Set("Allow", strings.Join(ms, ","))
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return false
+}
diff --git a/etcd/proxy/httpproxy/reverse.go b/etcd/proxy/httpproxy/reverse.go
new file mode 100644
index 00000000000..c005fa36358
--- /dev/null
+++ b/etcd/proxy/httpproxy/reverse.go
@@ -0,0 +1,218 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpproxy
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync/atomic"
+
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2http/httptypes"
+
+ "go.uber.org/zap"
+)
+
+// Hop-by-hop headers. These are removed when sent to the backend.
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
+// This list of headers borrowed from stdlib httputil.ReverseProxy
+var singleHopHeaders = []string{
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Authenticate",
+ "Proxy-Authorization",
+ "Te", // canonicalized version of "TE"
+ "Trailers",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+func removeSingleHopHeaders(hdrs *http.Header) {
+ for _, h := range singleHopHeaders {
+ hdrs.Del(h)
+ }
+}
+
+type reverseProxy struct {
+ lg *zap.Logger
+ director *director
+ transport http.RoundTripper
+}
+
+func (p *reverseProxy) ServeHTTP(rw http.ResponseWriter, clientreq *http.Request) {
+ proxyreq := new(http.Request)
+ *proxyreq = *clientreq
+
+ var (
+ proxybody []byte
+ err error
+ )
+
+ if clientreq.Body != nil {
+ proxybody, err = ioutil.ReadAll(clientreq.Body)
+ if err != nil {
+ msg := fmt.Sprintf("failed to read request body: %v", err)
+ p.lg.Info("failed to read request body", zap.Error(err))
+ e := httptypes.NewHTTPError(http.StatusInternalServerError, "httpproxy: "+msg)
+ if we := e.WriteTo(rw); we != nil {
+ p.lg.Debug(
+ "error writing HTTPError to remote addr",
+ zap.String("remote-addr", clientreq.RemoteAddr),
+ zap.Error(we),
+ )
+ }
+ return
+ }
+ }
+
+ // deep-copy the headers, as these will be modified below
+ proxyreq.Header = make(http.Header)
+ copyHeader(proxyreq.Header, clientreq.Header)
+
+ normalizeRequest(proxyreq)
+ removeSingleHopHeaders(&proxyreq.Header)
+ maybeSetForwardedFor(proxyreq)
+
+ endpoints := p.director.endpoints()
+ if len(endpoints) == 0 {
+ msg := "zero endpoints currently available"
+
+ // TODO: limit the rate of the error logging.
+ p.lg.Info(msg)
+ e := httptypes.NewHTTPError(http.StatusServiceUnavailable, "httpproxy: "+msg)
+ if we := e.WriteTo(rw); we != nil {
+ p.lg.Debug(
+ "error writing HTTPError to remote addr",
+ zap.String("remote-addr", clientreq.RemoteAddr),
+ zap.Error(we),
+ )
+ }
+ return
+ }
+
+ var requestClosed int32
+ completeCh := make(chan bool, 1)
+ closeNotifier, ok := rw.(http.CloseNotifier)
+ ctx, cancel := context.WithCancel(context.Background())
+ proxyreq = proxyreq.WithContext(ctx)
+ defer cancel()
+ if ok {
+ closeCh := closeNotifier.CloseNotify()
+ go func() {
+ select {
+ case <-closeCh:
+ atomic.StoreInt32(&requestClosed, 1)
+ p.lg.Info(
+ "client closed request prematurely",
+ zap.String("remote-addr", clientreq.RemoteAddr),
+ )
+ cancel()
+ case <-completeCh:
+ }
+ }()
+
+ defer func() {
+ completeCh <- true
+ }()
+ }
+
+ var res *http.Response
+
+ for _, ep := range endpoints {
+ if proxybody != nil {
+ proxyreq.Body = ioutil.NopCloser(bytes.NewBuffer(proxybody))
+ }
+ redirectRequest(proxyreq, ep.URL)
+
+ res, err = p.transport.RoundTrip(proxyreq)
+ if atomic.LoadInt32(&requestClosed) == 1 {
+ return
+ }
+ if err != nil {
+ p.lg.Info(
+ "failed to direct request",
+ zap.String("url", ep.URL.String()),
+ zap.Error(err),
+ )
+ ep.Failed()
+ continue
+ }
+
+ break
+ }
+
+ if res == nil {
+ // TODO: limit the rate of the error logging.
+ msg := fmt.Sprintf("unable to get response from %d endpoint(s)", len(endpoints))
+ p.lg.Info(msg)
+ e := httptypes.NewHTTPError(http.StatusBadGateway, "httpproxy: "+msg)
+ if we := e.WriteTo(rw); we != nil {
+ p.lg.Debug(
+ "error writing HTTPError to remote addr",
+ zap.String("remote-addr", clientreq.RemoteAddr),
+ zap.Error(we),
+ )
+ }
+ return
+ }
+
+ defer res.Body.Close()
+ removeSingleHopHeaders(&res.Header)
+ copyHeader(rw.Header(), res.Header)
+
+ rw.WriteHeader(res.StatusCode)
+ io.Copy(rw, res.Body)
+}
+
+func copyHeader(dst, src http.Header) {
+ for k, vv := range src {
+ for _, v := range vv {
+ dst.Add(k, v)
+ }
+ }
+}
+
+func redirectRequest(req *http.Request, loc url.URL) {
+ req.URL.Scheme = loc.Scheme
+ req.URL.Host = loc.Host
+}
+
+func normalizeRequest(req *http.Request) {
+ req.Proto = "HTTP/1.1"
+ req.ProtoMajor = 1
+ req.ProtoMinor = 1
+ req.Close = false
+}
+
+func maybeSetForwardedFor(req *http.Request) {
+ clientIP, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ return
+ }
+
+ // If we aren't the first proxy retain prior
+ // X-Forwarded-For information as a comma+space
+ // separated list and fold multiple headers into one.
+ if prior, ok := req.Header["X-Forwarded-For"]; ok {
+ clientIP = strings.Join(prior, ", ") + ", " + clientIP
+ }
+ req.Header.Set("X-Forwarded-For", clientIP)
+}
diff --git a/server/proxy/tcpproxy/doc.go b/etcd/proxy/tcpproxy/doc.go
similarity index 100%
rename from server/proxy/tcpproxy/doc.go
rename to etcd/proxy/tcpproxy/doc.go
diff --git a/server/proxy/tcpproxy/userspace.go b/etcd/proxy/tcpproxy/userspace.go
similarity index 89%
rename from server/proxy/tcpproxy/userspace.go
rename to etcd/proxy/tcpproxy/userspace.go
index a109c447c86..7011e33f03c 100644
--- a/server/proxy/tcpproxy/userspace.go
+++ b/etcd/proxy/tcpproxy/userspace.go
@@ -19,10 +19,11 @@ import (
"io"
"math/rand"
"net"
- "strings"
"sync"
"time"
+ "github.com/ls-2018/etcd_cn/code_debug/conn"
+
"go.uber.org/zap"
)
@@ -70,33 +71,17 @@ type TCPProxy struct {
pickCount int // for round robin
}
-// The parameter host is returned by net.SplitHostPort previously,
-// so it must be a valid host. This function is only to check whether
-// it's an IPv6 IP address.
-func isIPv6(host string) bool {
- return strings.IndexRune(host, ':') != -1
-}
-
-// A literal IPv6 address in hostport must be enclosed in square
-// brackets, as in "[::1]:80", "[::1%lo0]:80".
-func formatAddr(host string, port uint16) string {
- if isIPv6(host) {
- return fmt.Sprintf("[%s]:%d", host, port)
- }
- return fmt.Sprintf("%s:%d", host, port)
-}
-
func (tp *TCPProxy) Run() error {
tp.donec = make(chan struct{})
if tp.MonitorInterval == 0 {
tp.MonitorInterval = 5 * time.Minute
}
for _, srv := range tp.Endpoints {
- addr := formatAddr(srv.Target, srv.Port)
+ addr := fmt.Sprintf("%s:%d", srv.Target, srv.Port)
tp.remotes = append(tp.remotes, &remote{srv: srv, addr: addr})
}
- var eps []string
+ eps := []string{}
for _, ep := range tp.Endpoints {
eps = append(eps, fmt.Sprintf("%s:%d", ep.Target, ep.Port))
}
@@ -110,7 +95,7 @@ func (tp *TCPProxy) Run() error {
if err != nil {
return err
}
-
+ conn.PrintConn("TCPProxy", in)
go tp.serve(in)
}
}
diff --git a/server/verify/doc.go b/etcd/verify/doc.go
similarity index 100%
rename from server/verify/doc.go
rename to etcd/verify/doc.go
diff --git a/etcd/verify/over_verify.go b/etcd/verify/over_verify.go
new file mode 100644
index 00000000000..eba7d2bef70
--- /dev/null
+++ b/etcd/verify/over_verify.go
@@ -0,0 +1,147 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/ls-2018/etcd_cn/etcd/datadir"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ wal2 "github.com/ls-2018/etcd_cn/etcd/wal"
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+ "go.uber.org/zap"
+)
+
+const (
+ ENV_VERIFY = "ETCD_VERIFY"
+ ENV_VERIFY_ALL_VALUE = "all"
+)
+
+type Config struct {
+ // DataDir is a root directory where the data being verified are stored.
+ DataDir string
+
+ // ExactIndex requires consistent_index in backend exactly match the last committed WAL entry.
+ // Usually backend's consistent_index needs to be <= WAL.commit, but for backups the match
+ // is expected to be exact.
+ ExactIndex bool
+
+ Logger *zap.Logger
+}
+
+// Verify performs consistency checks of given etcd data-directory.
+// The errors are reported as the returned error, but for some situations
+// the function can also panic.
+// The function is expected to work on not-in-use data model, i.e.
+// no file-locks should be taken. Verify does not modified the data.
+func Verify(cfg Config) error {
+ lg := cfg.Logger
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ var err error
+ lg.Info("verification of persisted state", zap.String("data-dir", cfg.DataDir))
+ defer func() {
+ if err != nil {
+ lg.Error("verification of persisted state failed",
+ zap.String("data-dir", cfg.DataDir),
+ zap.Error(err))
+ } else if r := recover(); r != nil {
+ lg.Error("verification of persisted state failed",
+ zap.String("data-dir", cfg.DataDir))
+ panic(r)
+ } else {
+ lg.Info("verification of persisted state successful", zap.String("data-dir", cfg.DataDir))
+ }
+ }()
+
+ beConfig := backend.DefaultBackendConfig()
+ beConfig.Path = datadir.ToBackendFileName(cfg.DataDir)
+ beConfig.Logger = cfg.Logger
+
+ be := backend.New(beConfig)
+ defer be.Close()
+
+ snapshot, hardstate, err := validateWal(cfg)
+ if err != nil {
+ return err
+ }
+
+ // TODO: Perform validation of consistency of membership between
+ // backend/members & WAL confstate (and maybe storev2 if still exists).
+
+ return validateConsistentIndex(cfg, hardstate, snapshot, be)
+}
+
+// VerifyIfEnabled 根据ETCD_VERIFY环境设置执行校验.
+func VerifyIfEnabled(cfg Config) error {
+ if os.Getenv(ENV_VERIFY) == ENV_VERIFY_ALL_VALUE {
+ return Verify(cfg)
+ }
+ return nil
+}
+
+// MustVerifyIfEnabled 根据ETCD_VERIFY环境设置执行验证,发现问题就退出.
+func MustVerifyIfEnabled(cfg Config) {
+ if err := VerifyIfEnabled(cfg); err != nil {
+ cfg.Logger.Fatal("验证失败",
+ zap.String("data-dir", cfg.DataDir),
+ zap.Error(err))
+ }
+}
+
+func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error {
+ tx := be.BatchTx()
+ index, term := cindex.ReadConsistentIndex(tx)
+ if cfg.ExactIndex && index != hardstate.Commit {
+ return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit)
+ }
+ if cfg.ExactIndex && term != hardstate.Term {
+ return fmt.Errorf("backend.Term (%v) expected == WAL.HardState.term, (%v)", term, hardstate.Term)
+ }
+ if index > hardstate.Commit {
+ return fmt.Errorf("backend.ConsistentIndex (%v)必须是<= WAL.HardState.commit (%v)", index, hardstate.Commit)
+ }
+ if term > hardstate.Term {
+ return fmt.Errorf("backend.Term (%v)必须是<= WAL.HardState.term, (%v)", term, hardstate.Term)
+ }
+
+ if index < snapshot.Index {
+ return fmt.Errorf("backend.ConsistentIndex (%v)必须是>= last snapshot index (%v)", index, snapshot.Index)
+ }
+
+ cfg.Logger.Info("verification: consistentIndex OK", zap.Uint64("backend-consistent-index", index), zap.Uint64("hardstate-commit", hardstate.Commit))
+ return nil
+}
+
+func validateWal(cfg Config) (*walpb.Snapshot, *raftpb.HardState, error) {
+ walDir := datadir.ToWalDir(cfg.DataDir)
+
+ walSnaps, err := wal2.ValidSnapshotEntries(cfg.Logger, walDir)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ snapshot := walSnaps[len(walSnaps)-1]
+ hardstate, err := wal2.Verify(cfg.Logger, walDir, snapshot)
+ if err != nil {
+ return nil, nil, err
+ }
+ return &snapshot, hardstate, nil
+}
diff --git a/etcd/wal/over_decoder.go b/etcd/wal/over_decoder.go
new file mode 100644
index 00000000000..38ed3b15c0f
--- /dev/null
+++ b/etcd/wal/over_decoder.go
@@ -0,0 +1,125 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bufio"
+ "bytes"
+ "hash"
+ "io"
+ "sync"
+
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
+ "github.com/ls-2018/etcd_cn/pkg/crc"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+)
+
+const minSectorSize = 512
+
+type decoder struct {
+ mu sync.Mutex
+ brs []*bufio.Reader // 要读取的所有wal文件
+
+ // lastValidOff file offset following the last valid decoded record
+ lastValidOff int64 // 下一次decode的偏移量
+ crc hash.Hash32
+}
+
+func newDecoder(r ...io.Reader) *decoder {
+ readers := make([]*bufio.Reader, len(r))
+ for i := range r {
+ readers[i] = bufio.NewReader(r[i])
+ }
+ return &decoder{
+ brs: readers,
+ crc: crc.New(0, crcTable),
+ }
+}
+
+func (d *decoder) decode(rec *walpb.Record) error {
+ rec.Reset()
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return d.decodeRecord(rec)
+}
+
+// raft max message size is set to 1 MB in etcd etcd
+// assume projects set reasonable message size limit,
+// thus entry size should never exceed 10 MB
+
+func (d *decoder) decodeRecord(rec *walpb.Record) error {
+ if len(d.brs) == 0 {
+ return io.EOF
+ }
+
+ line, _, err := bufio.NewReader(d.brs[0]).ReadLine()
+ if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ length := len(line)
+ a := make([]byte, length)
+ all := 0
+ for _, item := range line {
+ if item == 0 {
+ all += 1
+ }
+ }
+ if all == length {
+ return io.EOF
+ }
+ if bytes.Equal(a, line) {
+ return io.EOF
+ }
+
+ if err := rec.Unmarshal(line); err != nil {
+ return err
+ }
+
+ // skip crc checking if the record type is crcType
+ if rec.Type != crcType {
+ d.crc.Write(rec.Data)
+ if err := rec.Validate(d.crc.Sum32()); err != nil {
+ return err
+ }
+ }
+ d.lastValidOff += int64(len(line)) + 1
+ return nil
+}
+
+func (d *decoder) updateCRC(prevCrc uint32) {
+ d.crc = crc.New(prevCrc, crcTable)
+}
+
+func (d *decoder) lastCRC() uint32 {
+ return d.crc.Sum32()
+}
+
+func (d *decoder) lastOffset() int64 { return d.lastValidOff }
+
+func mustUnmarshalEntry(d []byte) raftpb.Entry {
+ var e raftpb.Entry
+ pbutil.MustUnmarshal(&e, d)
+ return e
+}
+
+func mustUnmarshalState(d []byte) raftpb.HardState {
+ var s raftpb.HardState
+ pbutil.MustUnmarshal(&s, d)
+ return s
+}
diff --git a/etcd/wal/over_encoder.go b/etcd/wal/over_encoder.go
new file mode 100644
index 00000000000..6c4fc0ae01a
--- /dev/null
+++ b/etcd/wal/over_encoder.go
@@ -0,0 +1,83 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "hash"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
+ "github.com/ls-2018/etcd_cn/pkg/crc"
+ "github.com/ls-2018/etcd_cn/pkg/ioutil"
+)
+
+// walPageBytes
+const walPageBytes = 8 * minSectorSize // 8字节对齐
+// encoder模块把会增量的计算crc和数据一起写入到wal文件中. 下面为encoder数据结构undefined
+type encoder struct {
+ mu sync.Mutex
+ bw *ioutil.PageWriter
+ crc hash.Hash32
+ buf []byte // 缓存空间,默认为1M,降低数据分配的压力undefined,序列化时使用
+ uint64buf []byte // 将数据变成特定格式的数据,大端、小端
+}
+
+func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder {
+ return &encoder{
+ bw: ioutil.NewPageWriter(w, walPageBytes, pageOffset),
+ crc: crc.New(prevCrc, crcTable),
+ // 1MB buffer
+ buf: make([]byte, 1024*1024),
+ uint64buf: make([]byte, 8),
+ }
+}
+
+// newFileEncoder 使用当前文件偏移,创建一个encoder用于写数据
+func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) {
+ // prevCrc之前的crc码
+ offset, err := f.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return nil, err
+ }
+ return newEncoder(f, prevCrc, int(offset)), nil
+}
+
+func (e *encoder) encode(rec *walpb.Record) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ e.crc.Write(rec.Data)
+ rec.Crc = e.crc.Sum32()
+ var (
+ data []byte
+ err error
+ )
+
+ data, err = rec.Marshal()
+ if err != nil {
+ return err
+ }
+ data = append(data, '\n')
+ _, err = e.bw.Write(data)
+ return err
+}
+
+func (e *encoder) flush() error {
+ e.mu.Lock()
+ _, err := e.bw.FlushN()
+ e.mu.Unlock()
+ return err
+}
diff --git a/etcd/wal/over_file_pipeline.go b/etcd/wal/over_file_pipeline.go
new file mode 100644
index 00000000000..786ce3a2c12
--- /dev/null
+++ b/etcd/wal/over_file_pipeline.go
@@ -0,0 +1,100 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+
+ "go.uber.org/zap"
+)
+
+// filePipeline 分配磁盘空间的管道
+// wal新建新的文件时都是先新建一个tmp文件,当所有操作都完成后再重命名这个文件.wal使用file_pipeline这个模块在后台启动一个协程时刻准备一个临时文件以供使用,从而避免临时创建文件的开销
+type filePipeline struct {
+ lg *zap.Logger
+ dir string
+ size int64
+ count int
+ filec chan *fileutil.LockedFile
+ errc chan error
+ donec chan struct{}
+}
+
+func newFilePipeline(lg *zap.Logger, dir string, fileSize int64) *filePipeline {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ fp := &filePipeline{
+ lg: lg,
+ dir: dir,
+ size: fileSize, //
+ filec: make(chan *fileutil.LockedFile),
+ errc: make(chan error, 1),
+ donec: make(chan struct{}),
+ }
+ go fp.run() // 建立一个tmp文件
+ return fp
+}
+
+// Open 返回一个新的文件供写入.在再次调用Open之前,请重命名该文件,否则会出现文件碰撞的情况.
+func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) {
+ select {
+ case f = <-fp.filec:
+ case err = <-fp.errc:
+ }
+ return f, err
+}
+
+func (fp *filePipeline) Close() error {
+ close(fp.donec)
+ return <-fp.errc
+}
+
+func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) {
+ // count % 2,所以这个文件和上次发布的文件不一样.
+ fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2))
+ if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
+ return nil, err
+ }
+ if err = fileutil.Preallocate(f.File, fp.size, true); err != nil {
+ fp.lg.Error("在创建一个新的WAL时,未能预先分配空间", zap.Int64("size", fp.size), zap.Error(err))
+ f.Close()
+ return nil, err
+ }
+ fp.count++
+ return f, nil
+}
+
+func (fp *filePipeline) run() {
+ defer close(fp.errc)
+ for {
+ f, err := fp.alloc()
+ if err != nil {
+ fp.errc <- err
+ return
+ }
+ select {
+ case fp.filec <- f:
+ case <-fp.donec:
+ os.Remove(f.Name())
+ f.Close()
+ return
+ }
+ }
+}
diff --git a/etcd/wal/over_util.go b/etcd/wal/over_util.go
new file mode 100644
index 00000000000..1d828388258
--- /dev/null
+++ b/etcd/wal/over_util.go
@@ -0,0 +1,108 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+
+ "go.uber.org/zap"
+)
+
+var errBadWALName = errors.New("bad wal name")
+
+// Exist 如果在给定的目录中存在任何文件,则返回true.
+func Exist(dir string) bool {
+ names, err := fileutil.ReadDir(dir, fileutil.WithExt(".wal"))
+ if err != nil {
+ return false
+ }
+ return len(names) != 0
+}
+
+// searchIndex 返回 raft 索引部分等于或小于给定索引的名字的最后一个数组索引.
+func searchIndex(lg *zap.Logger, names []string, index uint64) (int, bool) {
+ for i := len(names) - 1; i >= 0; i-- {
+ name := names[i]
+ _, curIndex, err := parseWALName(name)
+ if err != nil {
+ lg.Panic("解析wal文件名字失败", zap.String("path", name), zap.Error(err))
+ }
+ if index >= curIndex {
+ return i, true
+ }
+ }
+ return -1, false
+}
+
+// isValidSeq 检查seq是否连续增加.
+func isValidSeq(lg *zap.Logger, names []string) bool {
+ var lastSeq uint64
+ for _, name := range names {
+ curSeq, _, err := parseWALName(name)
+ if err != nil {
+ lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err))
+ }
+ if lastSeq != 0 && lastSeq != curSeq-1 {
+ return false
+ }
+ lastSeq = curSeq
+ }
+ return true
+}
+
+// 返回指定目录下的所有wal文件
+func readWALNames(lg *zap.Logger, dirpath string) ([]string, error) {
+ names, err := fileutil.ReadDir(dirpath) // 返回指定目录下所有经过排序的文件
+ if err != nil {
+ return nil, err
+ }
+ wnames := checkWalNames(lg, names)
+ if len(wnames) == 0 {
+ return nil, ErrFileNotFound
+ }
+ return wnames, nil
+}
+
+// 获取后缀是.wal的文件
+func checkWalNames(lg *zap.Logger, names []string) []string {
+ wnames := make([]string, 0)
+ for _, name := range names {
+ if _, _, err := parseWALName(name); err != nil {
+ if !strings.HasSuffix(name, ".tmp") {
+ lg.Warn("wal目录 忽略文件:%s", zap.String("path", name))
+ }
+ continue
+ }
+ wnames = append(wnames, name)
+ }
+ return wnames
+}
+
+// 解析文件名
+func parseWALName(str string) (seq, index uint64, err error) {
+ if !strings.HasSuffix(str, ".wal") {
+ return 0, 0, errBadWALName
+ }
+ _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
+ return seq, index, err
+}
+
+func walName(seq, index uint64) string {
+ return fmt.Sprintf("%016x-%016x.wal", seq, index)
+}
diff --git a/etcd/wal/over_wal.go b/etcd/wal/over_wal.go
new file mode 100644
index 00000000000..3c2b472ecc4
--- /dev/null
+++ b/etcd/wal/over_wal.go
@@ -0,0 +1,959 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+
+ "go.uber.org/zap"
+)
+
+const (
+ metadataType int64 = iota + 1 // 元数据类型,元数据会保存当前的node id和cluster id.
+ entryType // 日志条目
+ stateType // 存放的是集群当前的状态HardState,如果集群的状态有变化,就会在WAL中存放一个新集群状态数据.里面包括当前Term,当前竞选者、当前已经commit的日志.
+ crcType // 存放crc校验字段.读取数据时,会根据这个记录里的crc字段对前面已经读出来的数据进行校验.
+ snapshotType // 存放snapshot的日志点.包括日志的Index和Term.
+ warnSyncDuration = time.Second // 是指在记录警告之前分配给fsync的时间量.
+)
+
+var (
+ // SegmentSizeBytes is the preallocated size of each wal segment file.
+ // The actual size might be larger than this. In general, the default
+ // value should be used, but this is defined as an exported variable
+ // so that tests can set a different segment size.
+ SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
+
+ ErrMetadataConflict = errors.New("wal: conflicting metadata found")
+ ErrFileNotFound = errors.New("wal: file not found")
+ ErrCRCMismatch = errors.New("wal: crc mismatch")
+ ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
+ ErrSnapshotNotFound = errors.New("wal: snapshot not found")
+ ErrSliceOutOfRange = errors.New("wal: slice bounds out of range")
+ ErrMaxWALEntrySizeLimitExceeded = errors.New("wal: max entry size limit exceeded")
+ ErrDecoderNotFound = errors.New("wal: decoder not found")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+)
+
+// WAL is a logical representation of the stable storage.
+// WAL is either in read mode or append mode but not both.
+// A newly created WAL is in append mode, and ready for appending records.
+// A just opened WAL is in read mode, and ready for reading records.
+// The WAL will be ready for appending after reading out all the previous records.
+// WAL是稳定存储的一个逻辑表示.WAL要么处于读取模式,要么处于追加模式,但不能同时进行.
+// 一个新创建的WAL处于追加模式,并准备好追加记录.一个刚打开的WAL处于读模式,并准备好读取记录.
+// 在读出所有以前的记录后,WAL将准备好进行追加.
+type WAL struct {
+ lg *zap.Logger
+ dir string // wal文件的存储目录
+ dirFile *os.File // 是一个用于重命名时同步的wal目录的fd.
+ metadata []byte // wal文件构建后会写的第一个metadata记录
+ state raftpb.HardState // wal文件构建后会写的第一个state记录
+ start walpb.Snapshot // wal开始的snapshot,代表读取wal时从这个snapshot的记录之后开始
+ decoder *decoder // wal记录的反序列化器
+ readClose func() error // 关闭反序列化器
+ unsafeNoSync bool // 非安全存储 默认是 false
+ mu sync.Mutex
+ enti uint64 // 保存到wal的最新日志索引
+ encoder *encoder // encoder to encode records
+ locks []*fileutil.LockedFile // 底层数据文件列表
+ fp *filePipeline
+}
+
+// Create 创建一个准备用于添加记录的WAL.给定的元数据被记录在每个WAL文件的头部,并且可以在文件打开后用ReadAll检索.
+func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) {
+ if Exist(dirpath) {
+ return nil, os.ErrExist
+ }
+
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ // 保持临时的WAL目录,这样WAL的初始化就会显得很原子化.
+ tmpdirpath := filepath.Clean(dirpath) + ".tmp"
+ if fileutil.Exist(tmpdirpath) {
+ if err := os.RemoveAll(tmpdirpath); err != nil {
+ return nil, err
+ }
+ }
+ defer os.RemoveAll(tmpdirpath)
+
+ if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
+ lg.Warn(
+ "无法创建wal临时目录",
+ zap.String("tmp-dir-path", tmpdirpath),
+ zap.String("dir-path", dirpath),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+
+ p := filepath.Join(tmpdirpath, walName(0, 0))
+ f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode) // 阻塞
+ if err != nil {
+ lg.Warn(
+ "未能存入一个初始WAL文件",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+ // 跳到末尾
+ if _, err = f.Seek(0, io.SeekEnd); err != nil {
+ lg.Warn(
+ "未能寻找到一个初始的WAL文件",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+ // 预分配文件,大小为SegmentSizeBytes(64MB)
+ if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
+ lg.Warn(
+ "未能预先分配一个初始的WAL文件",
+ zap.String("path", p),
+ zap.Int64("segment-bytes", SegmentSizeBytes),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+
+ w := &WAL{
+ lg: lg,
+ dir: dirpath,
+ metadata: metadata,
+ }
+ w.encoder, err = newFileEncoder(f.File, 0)
+ if err != nil {
+ return nil, err
+ }
+ w.locks = append(w.locks, f)
+ if err = w.saveCrc(0); err != nil {
+ return nil, err
+ }
+ // 将metadataType类型的record记录在wal的header处
+ if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
+ return nil, err
+ }
+ // 保存空的snapshot
+ if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+ return nil, err
+ }
+ logDirPath := w.dir
+ // 重命名,之前以.tmp结尾的文件,初始化完成之后重命名,类似原子操作
+ if w, err = w.renameWAL(tmpdirpath); err != nil {
+ lg.Warn(
+ fmt.Sprintf("重命名失败 .%s.tmp --> %s", tmpdirpath, w.dir),
+ zap.String("tmp-dir-path", tmpdirpath),
+ zap.String("dir-path", logDirPath),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+ var perr error
+ defer func() {
+ if perr != nil {
+ w.cleanupWAL(lg)
+ }
+ }()
+
+ // 目录被重新命名;同步父目录以保持重命名.
+ pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir)) // ./raftexample/db
+ if perr != nil {
+ lg.Warn(
+ "未能打开父数据目录",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ return nil, perr
+ }
+ dirCloser := func() error {
+ if perr = pdir.Close(); perr != nil {
+ lg.Warn(
+ "failed to close the parent data directory file",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ return perr
+ }
+ return nil
+ }
+ if perr = fileutil.Fsync(pdir); perr != nil {
+ dirCloser()
+ lg.Warn(
+ "未能同步父数据目录文件",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ return nil, perr
+ }
+ // 关闭目录
+ if err = dirCloser(); err != nil {
+ return nil, err
+ }
+
+ return w, nil
+}
+
+// SetUnsafeNoFsync ok
+func (w *WAL) SetUnsafeNoFsync() {
+ w.unsafeNoSync = true // 非安全存储 默认是 false
+}
+
+func (w *WAL) cleanupWAL(lg *zap.Logger) {
+ var err error
+ if err = w.Close(); err != nil {
+ lg.Panic("failed to close WAL during cleanup", zap.Error(err))
+ }
+ brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
+ if err = os.Rename(w.dir, brokenDirName); err != nil {
+ lg.Panic(
+ "failed to rename WAL during cleanup",
+ zap.Error(err),
+ zap.String("source-path", w.dir),
+ zap.String("rename-path", brokenDirName),
+ )
+ }
+}
+
+// raftexample/db/raftexample-1.tmp ---> raftexample/db/raftexample-1
+func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
+ if err := os.RemoveAll(w.dir); err != nil { // 删除 raftexample/db/raftexample-1
+ return nil, err
+ }
+ // 在非Windows平台上,重命名时要按住锁.释放锁并试图快速重新获得它可能是不稳定的,因为在此过程中,进程可能会分叉产生一个进程.
+ // Go运行时将fds设置为执行时关闭,但在分叉和执行之间存在一个窗口,另一个进程持有锁.
+ if err := os.Rename(tmpdirpath, w.dir); err != nil { // raftexample/db/raftexample-1.tmp ---> raftexample/db/raftexample-1
+ if _, ok := err.(*os.LinkError); ok {
+ return w.renameWALUnlock(tmpdirpath)
+ }
+ return nil, err
+ }
+ w.fp = newFilePipeline(w.lg, w.dir, SegmentSizeBytes)
+ df, err := fileutil.OpenDir(w.dir)
+ w.dirFile = df
+ return w, err
+}
+
+func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
+ // rename of directory with locked files doesn't work on windows/cifs;
+ // close the WAL to release the locks so the directory can be renamed.
+ w.lg.Info(
+ "closing WAL to release flock and retry directory renaming",
+ zap.String("from", tmpdirpath),
+ zap.String("to", w.dir),
+ )
+ w.Close()
+
+ if err := os.Rename(tmpdirpath, w.dir); err != nil {
+ return nil, err
+ }
+
+ // reopen and relock
+ newWAL, oerr := Open(w.lg, w.dir, walpb.Snapshot{})
+ if oerr != nil {
+ return nil, oerr
+ }
+ if _, _, _, err := newWAL.ReadAll(); err != nil {
+ newWAL.Close()
+ return nil, err
+ }
+ return newWAL, nil
+}
+
+// Open 在给定的快照处打开WAL.这个快照应该是先前保存在WAL中的,否则下面的ReadAll会失败.
+// 返回的WAL已经准备好读取,第一条记录将是给定sap之后的那条.在读出所有之前的记录之前,不能对WAL进行追加.
+func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ w, err := openAtIndex(lg, dirpath, snap, true)
+ if err != nil {
+ return nil, err
+ }
+ if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil { // ./raftexample/db/raftexample-1
+ return nil, err
+ }
+ return w, nil
+}
+
+// OpenForRead only opens the wal files for read.
+// Write on a read only wal panics.
+func OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ return openAtIndex(lg, dirpath, snap, false)
+}
+
+// 在指定位置打开wal
+func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ names, nameIndex, err := selectWALFiles(lg, dirpath, snap) // 选择合适的wal文件
+ if err != nil {
+ return nil, err
+ }
+
+ rs, ls, closer, err := openWALFiles(lg, dirpath, names, nameIndex, write) // 打开所有wal文件
+ if err != nil {
+ return nil, err
+ }
+
+ // 创建一个WAL准备读取
+ w := &WAL{
+ lg: lg,
+ dir: dirpath,
+ start: snap,
+ decoder: newDecoder(rs...),
+ readClose: closer,
+ locks: ls,
+ }
+
+ if write { // true
+ // 写入重用读出的文件描述符;不要关闭,以便
+ w.readClose = nil
+ if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil {
+ closer()
+ return nil, err
+ }
+ w.fp = newFilePipeline(lg, w.dir, SegmentSizeBytes)
+ }
+
+ return w, nil
+}
+
+// 选择合适的wal文件
+func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]string, int, error) {
+ names, err := readWALNames(lg, dirpath) // 返回指定目录下的所有wal文件
+ if err != nil {
+ return nil, -1, err
+ }
+
+ nameIndex, ok := searchIndex(lg, names, snap.Index) // 查找小于快照的第一个wal日志
+ if !ok || !isValidSeq(lg, names[nameIndex:]) { // 校验wal索引是否是增序
+ err = ErrFileNotFound
+ return nil, -1, err
+ }
+
+ return names, nameIndex, nil
+}
+
+// ok
+func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]io.Reader, []*fileutil.LockedFile, func() error, error) {
+ rcs := make([]io.ReadCloser, 0)
+ rs := make([]io.Reader, 0)
+ ls := make([]*fileutil.LockedFile, 0)
+ for _, name := range names[nameIndex:] {
+ p := filepath.Join(dirpath, name)
+ if write {
+ l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
+ if err != nil {
+ closeAll(lg, rcs...)
+ return nil, nil, nil, err
+ }
+ ls = append(ls, l)
+ rcs = append(rcs, l)
+ } else {
+ rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode)
+ if err != nil {
+ closeAll(lg, rcs...)
+ return nil, nil, nil, err
+ }
+ ls = append(ls, nil)
+ rcs = append(rcs, rf)
+ }
+ rs = append(rs, rcs[len(rcs)-1])
+ }
+
+ closer := func() error { return closeAll(lg, rcs...) }
+
+ return rs, ls, closer, nil
+}
+
+// ReadAll 读取所有的wal里的日志
+func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ rec := &walpb.Record{}
+ if w.decoder == nil {
+ return nil, state, nil, ErrDecoderNotFound
+ }
+ decoder := w.decoder
+ var match bool
+ for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+ switch rec.Type {
+ case entryType:
+ e := mustUnmarshalEntry(rec.Data)
+ // 0 <= e.Index-w.start.Index - 1 < len(ents)
+ if e.Index > w.start.Index {
+ // 防止 "panic:运行时错误:切片边界超出范围[:13038096702221461992],容量为0"
+ up := e.Index - w.start.Index - 1 //
+ if up > uint64(len(ents)) {
+ // 在调用append前返回错误导致运行时恐慌
+ return nil, state, nil, ErrSliceOutOfRange
+ }
+ // 下面这一行有可能覆盖一些 "未提交 "的条目.
+ // wal只关注写入日志,不会校验日志的index是否重复,
+ ents = append(ents[:up], e)
+ }
+ w.enti = e.Index // 保存到wal的最新日志索引
+
+ case stateType: // 集群状态变化
+ state = mustUnmarshalState(rec.Data)
+
+ case metadataType:
+ if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+ state.Reset()
+ return nil, state, nil, ErrMetadataConflict
+ }
+ metadata = rec.Data
+
+ case crcType: // 4
+ crc := decoder.crc.Sum32()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ state.Reset()
+ return nil, state, nil, ErrCRCMismatch
+ }
+ decoder.updateCRC(rec.Crc)
+
+ case snapshotType:
+ var snap walpb.Snapshot
+ pbutil.MustUnmarshal(&snap, rec.Data)
+ if snap.Index == w.start.Index {
+ if snap.Term != w.start.Term {
+ state.Reset()
+ return nil, state, nil, ErrSnapshotMismatch
+ }
+ match = true
+ }
+
+ default:
+ state.Reset()
+ return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ switch w.tail() {
+ case nil:
+ if err != io.EOF && err != io.ErrUnexpectedEOF {
+ state.Reset()
+ return nil, state, nil, err
+ }
+ default:
+ // 如果WAL是以写模式打开的,我们必须读取所有的条目.
+ if err != io.EOF {
+ state.Reset()
+ return nil, state, nil, err
+ }
+
+ if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil { // 跳到末尾
+ return nil, state, nil, err
+ }
+ if err = fileutil.ZeroToEnd(w.tail().File); err != nil { // 清空wal文件当前之后的数据,并固定分配文件空间
+ return nil, state, nil, err
+ }
+ }
+
+ err = nil
+ if !match { // wal 中没有发现当前的快照记录
+ err = ErrSnapshotNotFound
+ }
+
+ // 关闭decoder,禁止读取
+ if w.readClose != nil {
+ w.readClose()
+ w.readClose = nil
+ }
+ w.start = walpb.Snapshot{}
+
+ w.metadata = metadata
+
+ if w.tail() != nil { // wal文件
+ // 创建编码器(与解码器连锁crc),启用追加功能
+ w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
+ if err != nil {
+ return
+ }
+ }
+ w.decoder = nil
+
+ return metadata, state, ents, err
+}
+
+// ValidSnapshotEntries 返回给定目录下wal日志中的所有有效快照条目.如果快照条目的索引小于或等于最近提交的hardstate,则为有效.
+func ValidSnapshotEntries(lg *zap.Logger, walDir string) ([]walpb.Snapshot, error) {
+ var snaps []walpb.Snapshot
+ var state raftpb.HardState
+ var err error
+
+ rec := &walpb.Record{}
+ names, err := readWALNames(lg, walDir) // 获取wal目录下的所有wal文件
+ if err != nil {
+ return nil, err
+ }
+
+ // 在读模式下打开WAL文件,这样,当在其他地方以写模式打开同样的WAL时,就不会有冲突.
+ rs, _, closer, err := openWALFiles(lg, walDir, names, 0, false)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if closer != nil {
+ closer()
+ }
+ }()
+
+ // 从WAL文件的读者中创建一个新的解码器
+ decoder := newDecoder(rs...)
+
+ for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+ switch rec.Type {
+ case snapshotType: // 5
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ snaps = append(snaps, loadedSnap)
+ case stateType: // 3
+ state = mustUnmarshalState(rec.Data)
+ case crcType: // 4
+ crc := decoder.crc.Sum32()
+ // 解码器的当前crc必须与记录的crc相匹配
+ if crc != 0 && rec.Validate(crc) != nil {
+ return nil, ErrCRCMismatch
+ }
+ decoder.updateCRC(rec.Crc)
+ }
+ }
+ if err != io.EOF && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+
+ // 过滤任何打快照的行为
+ n := 0
+ for _, s := range snaps {
+ if s.Index <= state.Commit {
+ snaps[n] = s
+ n++
+ }
+ }
+ snaps = snaps[:n:n]
+ return snaps, nil
+}
+
+// Verify reads through the given WAL and verifies that it is not corrupted.
+// It creates a new decoder to read through the records of the given WAL.
+// It does not conflict with any open WAL, but it is recommended not to
+// call this function after opening the WAL for writing.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If the loaded snap doesn't match with the expected one, it will
+// return error ErrSnapshotMismatch.
+func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) (*raftpb.HardState, error) {
+ var metadata []byte
+ var err error
+ var match bool
+ var state raftpb.HardState
+
+ rec := &walpb.Record{}
+
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ names, nameIndex, err := selectWALFiles(lg, walDir, snap)
+ if err != nil {
+ return nil, err
+ }
+
+ // open wal files in read mode, so that there is no conflict
+ // when the same WAL is opened elsewhere in write mode
+ rs, _, closer, err := openWALFiles(lg, walDir, names, nameIndex, false)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if closer != nil {
+ closer()
+ }
+ }()
+
+ // create a new decoder from the readers on the WAL files
+ decoder := newDecoder(rs...)
+
+ for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+ switch rec.Type {
+ case metadataType:
+ if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+ return nil, ErrMetadataConflict
+ }
+ metadata = rec.Data
+ case crcType:
+ crc := decoder.crc.Sum32()
+ // Current crc of decoder must match the crc of the record.
+ // We need not match 0 crc, since the decoder is a new one at this point.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return nil, ErrCRCMismatch
+ }
+ decoder.updateCRC(rec.Crc)
+ case snapshotType:
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ if loadedSnap.Index == snap.Index {
+ if loadedSnap.Term != snap.Term {
+ return nil, ErrSnapshotMismatch
+ }
+ match = true
+ }
+ // We ignore all entry and state type records as these
+ // are not necessary for validating the WAL contents
+ case entryType:
+ case stateType:
+ pbutil.MustUnmarshal(&state, rec.Data)
+ default:
+ return nil, fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ // We do not have to read out all the WAL entries
+ // as the decoder is opened in read mode.
+ if err != io.EOF && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+
+ if !match {
+ return nil, ErrSnapshotNotFound
+ }
+
+ return &state, nil
+}
+
+// cut 当日志数据大于默认的64M时就会生成新的文件写入日志,新文件的第一条记录就是上一个wal文件最后的crc
+func (w *WAL) cut() error {
+ // close old wal file; truncate to avoid wasting space if an early cut
+ off, serr := w.tail().Seek(0, io.SeekCurrent)
+ if serr != nil {
+ return serr
+ }
+
+ if err := w.tail().Truncate(off); err != nil {
+ return err
+ }
+
+ if err := w.sync(); err != nil { // 日志截断?
+ return err
+ }
+
+ fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
+
+ // create a temp wal file with name sequence + 1, or truncate the existing one
+ newTail, err := w.fp.Open()
+ if err != nil {
+ return err
+ }
+
+ // update writer and save the previous crc
+ w.locks = append(w.locks, newTail)
+ prevCrc := w.encoder.crc.Sum32()
+ w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+ if err != nil {
+ return err
+ }
+
+ if err = w.saveCrc(prevCrc); err != nil {
+ return err
+ }
+
+ if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
+ return err
+ }
+
+ if err = w.saveState(&w.state); err != nil {
+ return err
+ }
+
+ // atomically move temp wal file to wal file
+ if err = w.sync(); err != nil { // 移动临时wal文件到wal文件
+ return err
+ }
+
+ off, err = w.tail().Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+
+ if err = os.Rename(newTail.Name(), fpath); err != nil {
+ return err
+ }
+
+ if err = fileutil.Fsync(w.dirFile); err != nil {
+ return err
+ }
+
+ // reopen newTail with its new path so calls to Name() match the wal filename format
+ newTail.Close()
+
+ if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
+ return err
+ }
+ if _, err = newTail.Seek(off, io.SeekStart); err != nil {
+ return err
+ }
+
+ w.locks[len(w.locks)-1] = newTail
+
+ prevCrc = w.encoder.crc.Sum32()
+ w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+ if err != nil {
+ return err
+ }
+
+ w.lg.Info("created a new WAL segment", zap.String("path", fpath))
+ return nil
+}
+
+// 强制wal日志刷盘
+func (w *WAL) sync() error {
+ if w.encoder != nil {
+ if err := w.encoder.flush(); err != nil {
+ return err
+ }
+ }
+ fmt.Println("wal flush")
+
+ if w.unsafeNoSync { // 非安全存储 默认是 false
+ return nil
+ }
+
+ start := time.Now()
+ // Fdatasync类似于fsync(),但不会刷新修改后的元数据,除非为了允许正确处理后续的数据检索而需要这些元数据.
+ err := fileutil.Fdatasync(w.tail().File)
+
+ took := time.Since(start)
+ if took > warnSyncDuration {
+ w.lg.Warn("缓慢 fdatasync", zap.Duration("took", took), zap.Duration("expected-duration", warnSyncDuration))
+ }
+ return err
+}
+
+// Sync 强制wal日志刷盘
+func (w *WAL) Sync() error {
+ return w.sync() // 强制刷盘
+}
+
+// ReleaseLockTo 释放锁,这些锁的索引比给定的索引小,但其中最大的一个除外.
+// 例如,如果WAL持有锁1,2,3,4,5,6,ReleaseLockTo(4)将释放 锁1,2,但保留3.ReleaseLockTo(5)将释放1,2,3,但保留4.
+func (w *WAL) ReleaseLockTo(index uint64) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if len(w.locks) == 0 {
+ return nil
+ }
+
+ var smaller int
+ found := false
+ for i, l := range w.locks {
+ _, lockIndex, err := parseWALName(filepath.Base(l.Name()))
+ if err != nil {
+ return err
+ }
+ if lockIndex >= index {
+ smaller = i - 1
+ found = true
+ break
+ }
+ }
+
+ // if no lock index is greater than the release index, we can
+ // release lock up to the last one(excluding).
+ if !found {
+ smaller = len(w.locks) - 1
+ }
+
+ if smaller <= 0 {
+ return nil
+ }
+
+ for i := 0; i < smaller; i++ {
+ if w.locks[i] == nil {
+ continue
+ }
+ w.locks[i].Close()
+ }
+ w.locks = w.locks[smaller:]
+
+ return nil
+}
+
+// Close closes the current WAL file and directory.
+func (w *WAL) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.fp != nil {
+ w.fp.Close()
+ w.fp = nil
+ }
+
+ if w.tail() != nil {
+ if err := w.sync(); err != nil { // 文件关闭时
+ return err
+ }
+ }
+ for _, l := range w.locks {
+ if l == nil {
+ continue
+ }
+ if err := l.Close(); err != nil {
+ w.lg.Error("failed to close WAL", zap.Error(err))
+ }
+ }
+
+ return w.dirFile.Close()
+}
+
+// 将日志保存到wal,更新wal写入的最新索引
+func (w *WAL) saveEntry(e *raftpb.Entry) error {
+ b := pbutil.MustMarshal(e)
+ rec := &walpb.Record{Type: entryType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ w.enti = e.Index
+ return nil
+}
+
+// 写当前的存储状态
+func (w *WAL) saveState(s *raftpb.HardState) error {
+ if raft.IsEmptyHardState(*s) {
+ return nil
+ }
+ w.state = *s
+ b := pbutil.MustMarshal(s)
+ rec := &walpb.Record{Type: stateType, Data: b}
+ return w.encoder.encode(rec)
+}
+
+// Save 日志发送给Follower的同时,Leader会将日志落盘,即写到WAL中,
+// 将raft交给上层应用的一些commit信息保存到wal
+func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
+ // 获取wal的写锁
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ // HardState变化或者新的日志条目则需要写wal
+ if raft.IsEmptyHardState(st) && len(ents) == 0 {
+ return nil
+ }
+ // 是否需要同步刷新磁盘
+ mustSync := raft.MustSync(st, w.state, len(ents))
+
+ // 将日志保存到wal,更新wal写入的最新索引
+ for i := range ents {
+ fmt.Printf("待刷盘---> wal.Save %s\n", string(ents[i].Data))
+ if err := w.saveEntry(&ents[i]); err != nil {
+ return err
+ }
+ }
+ // 持久化HardState, HardState表示服务器当前状态,定义在raft.pb.go,主要包含Term、Vote、Commit
+ if err := w.saveState(&st); err != nil {
+ return err
+ }
+ // 判断文件大小是否超过最大值
+ // 获取最后一个LockedFile的大小(已经使用的)
+ curOff, err := w.tail().Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ if curOff < SegmentSizeBytes {
+ if mustSync {
+ return w.sync() // 写日志时,判断是否刷盘
+ }
+ return nil
+ }
+ // 否则执行切割(也就是说明,WAL文件是可以超过64MB的)
+ return w.cut()
+}
+
+// SaveSnapshot 保存一条生成快照的日志
+func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
+ b := pbutil.MustMarshal(&e)
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ rec := &walpb.Record{Type: snapshotType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ // 只有当快照领先于最后的索引时才更新enti
+ if w.enti < e.Index {
+ w.enti = e.Index
+ }
+ return w.sync() // 保存快照时,刷盘
+}
+
+// 保存
+func (w *WAL) saveCrc(prevCrc uint32) error {
+ return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
+}
+
+// 返回最后一个锁文件
+func (w *WAL) tail() *fileutil.LockedFile {
+ if len(w.locks) > 0 {
+ return w.locks[len(w.locks)-1] // 返回最后一个锁文件
+ }
+ return nil
+}
+
+func (w *WAL) seq() uint64 {
+ t := w.tail()
+ if t == nil {
+ return 0
+ }
+ seq, _, err := parseWALName(filepath.Base(t.Name()))
+ if err != nil {
+ w.lg.Fatal("解析WAL名称失败", zap.String("name", t.Name()), zap.Error(err))
+ }
+ return seq
+}
+
+func closeAll(lg *zap.Logger, rcs ...io.ReadCloser) error {
+ stringArr := make([]string, 0)
+ for _, f := range rcs {
+ if err := f.Close(); err != nil {
+ lg.Warn("failed to close: ", zap.Error(err))
+ stringArr = append(stringArr, err.Error())
+ }
+ }
+ if len(stringArr) == 0 {
+ return nil
+ }
+ return errors.New(strings.Join(stringArr, ", "))
+}
diff --git a/server/storage/wal/repair.go b/etcd/wal/repair.go
similarity index 76%
rename from server/storage/wal/repair.go
rename to etcd/wal/repair.go
index 53734045167..7ab391fea95 100644
--- a/server/storage/wal/repair.go
+++ b/etcd/wal/repair.go
@@ -15,16 +15,13 @@
package wal
import (
- "errors"
"io"
"os"
"path/filepath"
- "time"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
"go.uber.org/zap"
-
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/server/v3/storage/wal/walpb"
)
// Repair tries to repair ErrUnexpectedEOF in the
@@ -42,34 +39,33 @@ func Repair(lg *zap.Logger, dirpath string) bool {
lg.Info("repairing", zap.String("path", f.Name()))
rec := &walpb.Record{}
- decoder := NewDecoder(fileutil.NewFileReader(f.File))
+ decoder := newDecoder(f)
for {
- lastOffset := decoder.LastOffset()
- err := decoder.Decode(rec)
- switch {
- case err == nil:
+ lastOffset := decoder.lastOffset()
+ err := decoder.decode(rec)
+ switch err {
+ case nil:
// update crc of the decoder when necessary
switch rec.Type {
- case CrcType:
- crc := decoder.LastCRC()
+ case crcType:
+ crc := decoder.crc.Sum32()
// current crc of decoder must match the crc of the record.
// do no need to match 0 crc, since the decoder is a new one at this case.
if crc != 0 && rec.Validate(crc) != nil {
return false
}
- decoder.UpdateCRC(rec.Crc)
+ decoder.updateCRC(rec.Crc)
}
continue
- case errors.Is(err, io.EOF):
+ case io.EOF:
lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.EOF))
return true
- case errors.Is(err, io.ErrUnexpectedEOF):
- brokenName := f.Name() + ".broken"
- bf, bferr := os.Create(brokenName)
+ case io.ErrUnexpectedEOF:
+ bf, bferr := os.Create(f.Name() + ".broken")
if bferr != nil {
- lg.Warn("failed to create backup file", zap.String("path", brokenName), zap.Error(bferr))
+ lg.Warn("failed to create backup file", zap.String("path", f.Name()+".broken"), zap.Error(bferr))
return false
}
defer bf.Close()
@@ -80,7 +76,7 @@ func Repair(lg *zap.Logger, dirpath string) bool {
}
if _, err = io.Copy(bf, f); err != nil {
- lg.Warn("failed to copy", zap.String("from", f.Name()), zap.String("to", brokenName), zap.Error(err))
+ lg.Warn("failed to copy", zap.String("from", f.Name()+".broken"), zap.String("to", f.Name()), zap.Error(err))
return false
}
@@ -89,13 +85,10 @@ func Repair(lg *zap.Logger, dirpath string) bool {
return false
}
- start := time.Now()
if err = fileutil.Fsync(f.File); err != nil {
lg.Warn("failed to fsync", zap.String("path", f.Name()), zap.Error(err))
return false
}
- walFsyncSec.Observe(time.Since(start).Seconds())
-
lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.ErrUnexpectedEOF))
return true
diff --git a/etcd/wal/walpb/over_self_serilize.go b/etcd/wal/walpb/over_self_serilize.go
new file mode 100644
index 00000000000..2ce5dbb938c
--- /dev/null
+++ b/etcd/wal/walpb/over_self_serilize.go
@@ -0,0 +1,39 @@
+package walpb
+
+import (
+ "encoding/json"
+)
+
+type Temp struct {
+ Type int64
+ Crc uint32
+ Data string
+}
+
+func (m *Record) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(Temp{
+ Type: m.Type,
+ Crc: m.Crc,
+ Data: string(m.Data),
+ })
+}
+
+func (m *Record) Unmarshal(dAtA []byte) error {
+ a := Temp{}
+ err := json.Unmarshal(dAtA, &a)
+ if err != nil {
+ return err
+ }
+ m.Type = a.Type
+ m.Crc = a.Crc
+ m.Data = []byte(a.Data)
+ return nil
+}
+
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+ return json.Unmarshal(dAtA, m)
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+ return json.Marshal(m)
+}
diff --git a/etcd/wal/walpb/record.go b/etcd/wal/walpb/record.go
new file mode 100644
index 00000000000..47146a86601
--- /dev/null
+++ b/etcd/wal/walpb/record.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package walpb
+
+import "errors"
+
+var ErrCRCMismatch = errors.New("walpb: crc mismatch")
+
+func (rec *Record) Validate(crc uint32) error {
+ if rec.Crc == crc {
+ return nil
+ }
+ rec.Reset()
+ return ErrCRCMismatch
+}
diff --git a/etcd/wal/walpb/record.pb.go b/etcd/wal/walpb/record.pb.go
new file mode 100644
index 00000000000..e3eb7f0319c
--- /dev/null
+++ b/etcd/wal/walpb/record.pb.go
@@ -0,0 +1,95 @@
+// Code generated by protoc-gen-gogo.
+// source: record.proto
+
+package walpb
+
+import (
+ "encoding/json"
+ fmt "fmt"
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ raftpb "github.com/ls-2018/etcd_cn/raft/raftpb"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+
+var (
+ _ = fmt.Errorf
+ _ = math.Inf
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Record struct {
+ Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
+ Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+}
+
+func (m *Record) Reset() { *m = Record{} }
+func (m *Record) String() string { return proto.CompactTextString(m) }
+func (*Record) ProtoMessage() {}
+func (*Record) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bf94fd919e302a1d, []int{0}
+}
+
+// Keep in sync with raftpb.SnapshotMetadata.
+type Snapshot struct {
+ Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
+ Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
+ // Field populated since >=etcd-3.5.0.
+ ConfState *raftpb.ConfState `protobuf:"bytes,3,opt,name=conf_state,json=confState" json:"conf_state,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bf94fd919e302a1d, []int{1}
+}
+
+func init() {
+ proto.RegisterType((*Record)(nil), "walpb.Record")
+ proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
+}
+
+func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) }
+
+var fileDescriptor_bf94fd919e302a1d = []byte{
+ // 234 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8e, 0x41, 0x4e, 0xc3, 0x30,
+ 0x10, 0x45, 0x63, 0xe2, 0x22, 0x18, 0xca, 0x02, 0xab, 0xaa, 0xa2, 0x2c, 0x4c, 0xd4, 0x55, 0x56,
+ 0x29, 0xe2, 0x08, 0x65, 0xcf, 0x22, 0x3d, 0x00, 0x72, 0x1d, 0xa7, 0x20, 0xd1, 0x8c, 0x35, 0xb5,
+ 0x04, 0xdc, 0x84, 0x23, 0x65, 0xc9, 0x09, 0x10, 0x84, 0x8b, 0xa0, 0x8c, 0x03, 0x1b, 0xfb, 0xeb,
+ 0x7d, 0xf9, 0x7d, 0xc3, 0x9c, 0x9c, 0x45, 0x6a, 0x2a, 0x4f, 0x18, 0x50, 0xcd, 0x5e, 0xcc, 0xb3,
+ 0xdf, 0xe5, 0x8b, 0x3d, 0xee, 0x91, 0xc9, 0x7a, 0x4c, 0xb1, 0xcc, 0x97, 0x64, 0xda, 0xb0, 0x1e,
+ 0x0f, 0xbf, 0xe3, 0x2b, 0xf2, 0xd5, 0x3d, 0x9c, 0xd6, 0x2c, 0x51, 0x19, 0xc8, 0xf0, 0xe6, 0x5d,
+ 0x26, 0x0a, 0x51, 0xa6, 0x1b, 0xd9, 0x7f, 0x5e, 0x27, 0x35, 0x13, 0xb5, 0x84, 0xd4, 0x92, 0xcd,
+ 0x4e, 0x0a, 0x51, 0x5e, 0x4e, 0xc5, 0x08, 0x94, 0x02, 0xd9, 0x98, 0x60, 0xb2, 0xb4, 0x10, 0xe5,
+ 0xbc, 0xe6, 0xbc, 0x22, 0x38, 0xdb, 0x76, 0xc6, 0x1f, 0x1f, 0x31, 0xa8, 0x1c, 0x66, 0x4f, 0x5d,
+ 0xe3, 0x5e, 0x59, 0x29, 0xa7, 0x97, 0x11, 0xf1, 0x9a, 0xa3, 0x03, 0x4b, 0xe5, 0xff, 0x9a, 0xa3,
+ 0x83, 0xba, 0x01, 0xb0, 0xd8, 0xb5, 0x0f, 0xc7, 0x60, 0x82, 0x63, 0xf7, 0xc5, 0xed, 0x55, 0x15,
+ 0x7f, 0x5e, 0xdd, 0x61, 0xd7, 0x6e, 0xc7, 0xa2, 0x3e, 0xb7, 0x7f, 0x71, 0xb3, 0xe8, 0xbf, 0x75,
+ 0xd2, 0x0f, 0x5a, 0x7c, 0x0c, 0x5a, 0x7c, 0x0d, 0x5a, 0xbc, 0xff, 0xe8, 0xe4, 0x37, 0x00, 0x00,
+ 0xff, 0xff, 0xc3, 0x36, 0x0c, 0xad, 0x1d, 0x01, 0x00, 0x00,
+}
+
+func (m *Record) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
+
+func (m *Snapshot) Size() (n int) {
+ marshal, _ := json.Marshal(m)
+ return len(marshal)
+}
diff --git a/server/storage/wal/walpb/record.proto b/etcd/wal/walpb/record.proto
similarity index 95%
rename from server/storage/wal/walpb/record.proto
rename to etcd/wal/walpb/record.proto
index aed4351d315..536fa6c19c1 100644
--- a/server/storage/wal/walpb/record.proto
+++ b/etcd/wal/walpb/record.proto
@@ -2,7 +2,7 @@ syntax = "proto2";
package walpb;
import "gogoproto/gogo.proto";
-import "raftpb/raft.proto";
+import "raft/raftpb/raft.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
diff --git a/etcd3-multinode-systemd.md b/etcd3-multinode-systemd.md
new file mode 100644
index 00000000000..6e9f1de24cb
--- /dev/null
+++ b/etcd3-multinode-systemd.md
@@ -0,0 +1,173 @@
+# etcd3 multi-node cluster
+
+Here's how to deploy etcd cluster with systemd.
+
+## Set up data directory
+
+etcd needs data directory on host machine. Configure the data directory accessible to systemd as:
+
+```
+sudo mkdir -p /var/lib/etcd
+sudo chown -R root:$(whoami) /var/lib/etcd
+sudo chmod -R a+rw /var/lib/etcd
+```
+
+## Write systemd service file
+
+In each machine, write etcd systemd service files:
+
+```
+cat > /tmp/my-etcd-1.service < /tmp/my-etcd-2.service < /tmp/my-etcd-3.service < \
-PUT assigns the specified value with the specified key. If key already holds a value, it is overwritten.
-
-RPC: Put
-
#### Options
-- lease -- lease ID (in hexadecimal) to attach to the key.
-
-- prev-kv -- return the previous key-value pair before modification.
-
-- ignore-value -- updates the key using its current value.
-
-- ignore-lease -- updates the key using its current lease.
-
-#### Output
-
-`OK`
+- lease -- 租约ID(十六进制),以附加到key上.
+- prev-kv -- 返回修改前的键值对.
+- ignore-value -- 使用其当前值更新该键.
+- ignore-lease -- 使用其当前租约更新key.
#### Examples
```bash
-./etcdctl put foo bar --lease=1234abcd
-# OK
-./etcdctl get foo
-# foo
-# bar
-./etcdctl put foo --ignore-value # to detache lease
-# OK
+leaseID=`echo $(etcdctl lease grant 5)|awk '{print $2}'`
+echo $leaseID
+etcdctl put foo bar --lease=$leaseID
+etcdctl get foo
+etcdctl put foo --ignore-value # 移除租期
+sleep 6
+etcdctl get foo
```
```bash
-./etcdctl put foo bar --lease=1234abcd
+leaseID=`echo $(etcdctl lease grant 5)|awk '{print $2}'`
+echo $leaseID
+etcdctl put foo bar --lease=$$leaseID
# OK
-./etcdctl put foo bar1 --ignore-lease # to use existing lease 1234abcd
+etcdctl put foo bar1 --ignore-lease # to use existing lease 1234abcd
# OK
-./etcdctl get foo
+etcdctl get foo
# foo
# bar1
-```
-
-```bash
-./etcdctl put foo bar1 --prev-kv
+etcdctl put foo bar1 --prev-kv
# OK
# foo
# bar
-./etcdctl get foo
-# foo
# bar1
```
-#### Remarks
-
-If \ isn't given as command line argument, this command tries to read the value from standard input.
-
-When \ begins with '-', \ is interpreted as a flag.
-Insert '--' for workaround:
-
-```bash
-./etcdctl put --
-./etcdctl put --
```
-
-Providing \ in a new line after using `carriage return` is not supported and etcdctl may hang in that case. For example, following case is not supported:
-
-```bash
-./etcdctl put \r
-
-```
-
-A \ can have multiple lines or spaces but it must be provided with a double-quote as demonstrated below:
-
-```bash
-./etcdctl put foo "bar1 2 3"
+echo -e 'demo
+test' |etcdctl put asd --
+
+etcdctl put -- a b
```
### GET [options] \ [range_end]
-GET gets the key or a range of keys [key, range_end) if range_end is given.
+GET 获取键或键的范围 [key, range_end) if range_end is given.
RPC: Range
@@ -134,20 +99,20 @@ RPC: Range
First, populate etcd with some keys:
```bash
-./etcdctl put foo bar
+etcdctl put foo bar
# OK
-./etcdctl put foo1 bar1
+etcdctl put foo1 bar1
# OK
-./etcdctl put foo2 bar2
+etcdctl put foo2 bar2
# OK
-./etcdctl put foo3 bar3
+etcdctl put foo3 bar3
# OK
```
Get the key named `foo`:
```bash
-./etcdctl get foo
+etcdctl get foo
# foo
# bar
```
@@ -155,7 +120,7 @@ Get the key named `foo`:
Get all keys:
```bash
-./etcdctl get --from-key ''
+etcdctl get --from-key ''
# foo
# bar
# foo1
@@ -169,7 +134,7 @@ Get all keys:
Get all keys with names greater than or equal to `foo1`:
```bash
-./etcdctl get --from-key foo1
+etcdctl get --from-key foo1
# foo1
# bar1
# foo2
@@ -181,7 +146,7 @@ Get all keys with names greater than or equal to `foo1`:
Get keys with names greater than or equal to `foo1` and less than `foo3`:
```bash
-./etcdctl get foo1 foo3
+etcdctl get foo1 foo3
# foo1
# bar1
# foo2
@@ -190,21 +155,22 @@ Get keys with names greater than or equal to `foo1` and less than `foo3`:
#### Remarks
-If any key or value contains non-printable characters or control characters, simple formatted output can be ambiguous due to new lines. To resolve this issue, set `--hex` to hex encode all strings.
+If any key or value contains non-printable characters or control characters, simple formatted output can backend ambiguous
+due to new lines. To resolve this issue, set `--hex` to hex encode all strings.
### DEL [options] \ [range_end]
-Removes the specified key or range of keys [key, range_end) if range_end is given.
+移除指定的键或键的范围 [key, range_end) if range_end is given.
RPC: DeleteRange
#### Options
-- prefix -- delete keys by matching prefix
+- prefix -- 通过匹配前缀删除键
-- prev-kv -- return deleted key-value pairs
+- prev-kv -- 返回删除的k,v 键值对
-- from-key -- delete keys that are greater than or equal to the given key using byte compare
+- from-key -- 使用字节比较法删除大于或等于给定键的键.
#### Output
@@ -213,51 +179,52 @@ Prints the number of keys that were removed in decimal if DEL succeeded.
#### Examples
```bash
-./etcdctl put foo bar
+etcdctl put foo bar
# OK
-./etcdctl del foo
+etcdctl del foo
# 1
-./etcdctl get foo
+etcdctl get foo
```
```bash
-./etcdctl put key val
+etcdctl put key val
# OK
-./etcdctl del --prev-kv key
+etcdctl del --prev-kv key
# 1
# key
# val
-./etcdctl get key
+etcdctl get key
```
```bash
-./etcdctl put a 123
+etcdctl put a 123
# OK
-./etcdctl put b 456
+etcdctl put b 456
# OK
-./etcdctl put z 789
+etcdctl put z 789
# OK
-./etcdctl del --from-key a
+etcdctl del --from-key a
# 3
-./etcdctl get --from-key a
+etcdctl get --from-key a
```
```bash
-./etcdctl put zoo val
+etcdctl put zoo val
# OK
-./etcdctl put zoo1 val1
+etcdctl put zoo1 val1
# OK
-./etcdctl put zoo2 val2
+etcdctl put zoo2 val2
# OK
-./etcdctl del --prefix zoo
+etcdctl del --prefix zoo
# 3
-./etcdctl get zoo2
+etcdctl get zoo2
```
### TXN [options]
-TXN reads multiple etcd requests from standard input and applies them as a single atomic transaction.
-A transaction consists of list of conditions, a list of requests to apply if all the conditions are true, and a list of requests to apply if any condition is false.
+TXN reads multiple etcd requests from standard input and applies them as a single atomic transaction. A transaction
+consists of list of conditions, a list of requests to apply if all the conditions are true, and a list of requests to
+apply if any condition is false.
RPC: Txn
@@ -268,6 +235,7 @@ RPC: Txn
- interactive -- input transaction with interactive prompting.
#### Input Format
+
```ebnf
::= * "\n" "\n" "\n"
::= (||||) "\n"
@@ -275,7 +243,7 @@ RPC: Txn
:= ("c"|"create")"("")" ::= ("m"|"mod")"("")" ::= ("val"|"value")"("")"
- ::= ("ver"|"version")"("")"
+ ::= ("versionCount"|"version")"("")" ::= "lease("")" ::= *
::= *
@@ -289,13 +257,15 @@ RPC: Txn
#### Output
-`SUCCESS` if etcd processed the transaction success list, `FAILURE` if etcd processed the transaction failure list. Prints the output for each command in the executed request list, each separated by a blank line.
+`SUCCESS` if etcd processed the transaction success list, `FAILURE` if etcd processed the transaction failure list.
+Prints the output for each command in the executed request list, each separated by a blank line.
#### Examples
txn in interactive mode:
+
```bash
-./etcdctl txn -i
+etcdctl txn -i
# compares:
mod("key1") > "0"
@@ -314,8 +284,9 @@ put key2 "some extra key"
```
txn in non-interactive mode:
+
```bash
-./etcdctl txn <<<'mod("key1") > "0"
+etcdctl txn <<<'mod("key1") > "0"
put key1 "overwrote-key1"
@@ -333,10 +304,12 @@ put key2 "some extra key"
#### Remarks
-When using multi-line values within a TXN command, newlines must be represented as `\n`. Literal newlines will cause parsing failures. This differs from other commands (such as PUT) where the shell will convert literal newlines for us. For example:
+When using multi-line values within a TXN command, newlines必须是represented as `\n`. Literal newlines will cause
+parsing failures. This differs from other commands (such as PUT) where the shell will convert literal newlines for us.
+For example:
```bash
-./etcdctl txn <<<'mod("key1") > "0"
+etcdctl txn <<<'mod("key1") > "0"
put key1 "overwrote-key1"
@@ -356,27 +329,30 @@ put key2 "this is\na multi-line\nvalue"
COMPACTION discards all etcd event history prior to a given revision. Since etcd uses a multiversion concurrency control
model, it preserves all key updates as event history. When the event history up to some revision is no longer needed,
-all superseded keys may be compacted away to reclaim storage space in the etcd backend database.
+all superseded keys may backend compacted away to reclaim storage space in the etcd backend database.
RPC: Compact
#### Options
-- physical -- 'true' to wait for compaction to physically remove all old revisions
+- physical -- 'true' 等待压缩以实际删除所有旧修订
#### Output
Prints the compacted revision.
#### Example
+
```bash
-./etcdctl compaction 1234
+etcdctl compaction 1234
# compacted revision 1234
```
### WATCH [options] [key or prefix] [range_end] [--] [exec-command arg1 arg2 ...]
-Watch watches events stream on keys or prefixes, [key or prefix, range_end) if range_end is given. The watch command runs until it encounters an error or is terminated by the user. If range_end is given, it must be lexicographically greater than key or "\x00".
+Watch watches events stream on keys or prefixes, [key or prefix, range_end) if range_end is given. The watch command
+runs until it encounters an error or is terminated by the user. If range_end is given, it必须是lexicographically
+greater than key or "\x00".
RPC: Watch
@@ -409,14 +385,14 @@ watch [options] \n
##### Non-interactive
```bash
-./etcdctl watch foo
+etcdctl watch foo
# PUT
# foo
# bar
```
```bash
-ETCDCTL_WATCH_KEY=foo ./etcdctl watch
+ETCDCTL_WATCH_KEY=foo etcdctl watch
# PUT
# foo
# bar
@@ -425,7 +401,7 @@ ETCDCTL_WATCH_KEY=foo ./etcdctl watch
Receive events and execute `echo watch event received`:
```bash
-./etcdctl watch foo -- echo watch event received
+etcdctl watch foo -- echo watch event received
# PUT
# foo
# bar
@@ -435,7 +411,7 @@ Receive events and execute `echo watch event received`:
Watch response is set via `ETCD_WATCH_*` environmental variables:
```bash
-./etcdctl watch foo -- sh -c "env | grep ETCD_WATCH_"
+etcdctl watch foo -- sh -c "env | grep ETCD_WATCH_"
# PUT
# foo
@@ -450,7 +426,7 @@ Watch with environmental variables and execute `echo watch event received`:
```bash
export ETCDCTL_WATCH_KEY=foo
-./etcdctl watch -- echo watch event received
+etcdctl watch -- echo watch event received
# PUT
# foo
# bar
@@ -460,7 +436,7 @@ export ETCDCTL_WATCH_KEY=foo
```bash
export ETCDCTL_WATCH_KEY=foo
export ETCDCTL_WATCH_RANGE_END=foox
-./etcdctl watch -- echo watch event received
+etcdctl watch -- echo watch event received
# PUT
# fob
# bar
@@ -470,7 +446,7 @@ export ETCDCTL_WATCH_RANGE_END=foox
##### Interactive
```bash
-./etcdctl watch -i
+etcdctl watch -i
watch foo
watch foo
# PUT
@@ -484,7 +460,7 @@ watch foo
Receive events and execute `echo watch event received`:
```bash
-./etcdctl watch -i
+etcdctl watch -i
watch foo -- echo watch event received
# PUT
# foo
@@ -496,7 +472,7 @@ Watch with environmental variables and execute `echo watch event received`:
```bash
export ETCDCTL_WATCH_KEY=foo
-./etcdctl watch -i
+etcdctl watch -i
watch -- echo watch event received
# PUT
# foo
@@ -507,7 +483,7 @@ watch -- echo watch event received
```bash
export ETCDCTL_WATCH_KEY=foo
export ETCDCTL_WATCH_RANGE_END=foox
-./etcdctl watch -i
+etcdctl watch -i
watch -- echo watch event received
# PUT
# fob
@@ -521,8 +497,8 @@ LEASE provides commands for key lease management.
### LEASE GRANT \
-LEASE GRANT creates a fresh lease with a server-selected time-to-live in seconds
-greater than or equal to the requested TTL value.
+LEASE GRANT creates a fresh lease with a server-selected time-to-live in seconds greater than or equal to the requested
+TTL value.
RPC: LeaseGrant
@@ -533,7 +509,7 @@ Prints a message with the granted lease ID.
#### Example
```bash
-./etcdctl lease grant 60
+etcdctl lease grant 60
# lease 32695410dcc0ca06 granted with TTL(60s)
```
@@ -550,7 +526,7 @@ Prints a message indicating the lease is revoked.
#### Example
```bash
-./etcdctl lease revoke 32695410dcc0ca06
+etcdctl lease revoke 32695410dcc0ca06
# lease 32695410dcc0ca06 revoked
```
@@ -562,7 +538,7 @@ RPC: LeaseTimeToLive
#### Options
-- keys -- Get keys attached to this lease
+- keys -- 获取租约附加到了哪些key上
#### Output
@@ -571,28 +547,28 @@ Prints lease information.
#### Example
```bash
-./etcdctl lease grant 500
+etcdctl lease grant 500
# lease 2d8257079fa1bc0c granted with TTL(500s)
-./etcdctl put foo1 bar --lease=2d8257079fa1bc0c
+etcdctl put foo1 bar --lease=2d8257079fa1bc0c
# OK
-./etcdctl put foo2 bar --lease=2d8257079fa1bc0c
+etcdctl put foo2 bar --lease=2d8257079fa1bc0c
# OK
-./etcdctl lease timetolive 2d8257079fa1bc0c
+etcdctl lease timetolive 2d8257079fa1bc0c
# lease 2d8257079fa1bc0c granted with TTL(500s), remaining(481s)
-./etcdctl lease timetolive 2d8257079fa1bc0c --keys
+etcdctl lease timetolive 2d8257079fa1bc0c --keys
# lease 2d8257079fa1bc0c granted with TTL(500s), remaining(472s), attached keys([foo2 foo1])
-./etcdctl lease timetolive 2d8257079fa1bc0c --write-out=json
+etcdctl lease timetolive 2d8257079fa1bc0c --write-out=json
# {"cluster_id":17186838941855831277,"member_id":4845372305070271874,"revision":3,"raft_term":2,"id":3279279168933706764,"ttl":465,"granted-ttl":500,"keys":null}
-./etcdctl lease timetolive 2d8257079fa1bc0c --write-out=json --keys
+etcdctl lease timetolive 2d8257079fa1bc0c --write-out=json --keys
# {"cluster_id":17186838941855831277,"member_id":4845372305070271874,"revision":3,"raft_term":2,"id":3279279168933706764,"ttl":459,"granted-ttl":500,"keys":["Zm9vMQ==","Zm9vMg=="]}
-./etcdctl lease timetolive 2d8257079fa1bc0c
+etcdctl lease timetolive 2d8257079fa1bc0c
# lease 2d8257079fa1bc0c already expired
```
@@ -609,10 +585,10 @@ Prints a message with a list of active leases.
#### Example
```bash
-./etcdctl lease grant 60
+etcdctl lease grant 60
# lease 32695410dcc0ca06 granted with TTL(60s)
-./etcdctl lease list
+etcdctl lease list
32695410dcc0ca06
```
@@ -627,8 +603,9 @@ RPC: LeaseKeepAlive
Prints a message for every keep alive sent or prints a message indicating the lease is gone.
#### Example
+
```bash
-./etcdctl lease keep-alive 32695410dcc0ca0
+etcdctl lease keep-alive 32695410dcc0ca0
# lease 32695410dcc0ca0 keepalived with TTL(100)
# lease 32695410dcc0ca0 keepalived with TTL(100)
# lease 32695410dcc0ca0 keepalived with TTL(100)
@@ -658,7 +635,7 @@ Prints the member ID of the new member and the cluster ID.
#### Example
```bash
-./etcdctl member add newMember --peer-urls=https://127.0.0.1:12345
+etcdctl member add newMember --peer-urls=https://127.0.0.1:12345
Member ced000fda4d05edf added to cluster 8c4281cc65c7b112
@@ -684,7 +661,7 @@ Prints the member ID of the updated member and the cluster ID.
#### Example
```bash
-./etcdctl member update 2be1eb8f84b7f63e --peer-urls=https://127.0.0.1:11112
+etcdctl member update 2be1eb8f84b7f63e --peer-urls=https://127.0.0.1:11112
# Member 2be1eb8f84b7f63e updated in cluster ef37ad9dc622a7c4
```
@@ -701,7 +678,7 @@ Prints the member ID of the removed member and the cluster ID.
#### Example
```bash
-./etcdctl member remove 2be1eb8f84b7f63e
+etcdctl member remove 2be1eb8f84b7f63e
# Member 2be1eb8f84b7f63e removed from cluster ef37ad9dc622a7c4
```
@@ -718,19 +695,19 @@ Prints a humanized table of the member IDs, statuses, names, peer addresses, and
#### Examples
```bash
-./etcdctl member list
+etcdctl member list
# 8211f1d0f64f3269, started, infra1, http://127.0.0.1:12380, http://127.0.0.1:2379
# 91bc3c398fb3c146, started, infra2, http://127.0.0.1:22380, http://127.0.0.1:22379
# fd422379fda50e48, started, infra3, http://127.0.0.1:32380, http://127.0.0.1:32379
```
```bash
-./etcdctl -w json member list
+etcdctl -w json member list
# {"header":{"cluster_id":17237436991929493444,"member_id":9372538179322589801,"raft_term":2},"members":[{"ID":9372538179322589801,"name":"infra1","peerURLs":["http://127.0.0.1:12380"],"clientURLs":["http://127.0.0.1:2379"]},{"ID":10501334649042878790,"name":"infra2","peerURLs":["http://127.0.0.1:22380"],"clientURLs":["http://127.0.0.1:22379"]},{"ID":18249187646912138824,"name":"infra3","peerURLs":["http://127.0.0.1:32380"],"clientURLs":["http://127.0.0.1:32379"]}]}
```
```bash
-./etcdctl -w table member list
+etcdctl -w table member list
+------------------+---------+--------+------------------------+------------------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS |
+------------------+---------+--------+------------------------+------------------------+
@@ -750,26 +727,27 @@ ENDPOINT provides commands for querying individual endpoints.
### ENDPOINT HEALTH
-ENDPOINT HEALTH checks the health of the list of endpoints with respect to cluster. An endpoint is unhealthy
-when it cannot participate in consensus with the rest of the cluster.
+ENDPOINT HEALTH checks the health of the list of endpoints with respect to cluster. An endpoint is unhealthy when it
+cannot participate in consensus with the rest of the cluster.
#### Output
-If an endpoint can participate in consensus, prints a message indicating the endpoint is healthy. If an endpoint fails to participate in consensus, prints a message indicating the endpoint is unhealthy.
+If an endpoint can participate in consensus, prints a message indicating the endpoint is healthy. If an endpoint fails
+to participate in consensus, prints a message indicating the endpoint is unhealthy.
#### Example
Check the default endpoint's health:
```bash
-./etcdctl endpoint health
+etcdctl endpoint health
# 127.0.0.1:2379 is healthy: successfully committed proposal: took = 2.095242ms
```
Check all endpoints for the cluster associated with the default endpoint:
```bash
-./etcdctl endpoint --cluster health
+etcdctl endpoint --cluster health
# http://127.0.0.1:2379 is healthy: successfully committed proposal: took = 1.060091ms
# http://127.0.0.1:22379 is healthy: successfully committed proposal: took = 903.138µs
# http://127.0.0.1:32379 is healthy: successfully committed proposal: took = 1.113848ms
@@ -783,39 +761,41 @@ ENDPOINT STATUS queries the status of each endpoint in the given endpoint list.
##### Simple format
-Prints a humanized table of each endpoint URL, ID, version, database size, leadership status, raft term, and raft status.
+Prints a humanized table of each endpoint URL, ID, version, database size, leadership status, raft term, and raft
+status.
##### JSON format
-Prints a line of JSON encoding each endpoint URL, ID, version, database size, leadership status, raft term, and raft status.
+Prints a line of JSON encoding each endpoint URL, ID, version, database size, leadership status, raft term, and raft
+status.
#### Examples
Get the status for the default endpoint:
```bash
-./etcdctl endpoint status
+etcdctl endpoint status
# 127.0.0.1:2379, 8211f1d0f64f3269, 3.0.0, 25 kB, false, 2, 63
```
Get the status for the default endpoint as JSON:
```bash
-./etcdctl -w json endpoint status
+etcdctl -w json endpoint status
# [{"Endpoint":"127.0.0.1:2379","Status":{"header":{"cluster_id":17237436991929493444,"member_id":9372538179322589801,"revision":2,"raft_term":2},"version":"3.0.0","dbSize":24576,"leader":18249187646912138824,"raftIndex":32623,"raftTerm":2}}]
```
Get the status for all endpoints in the cluster associated with the default endpoint:
```bash
-./etcdctl -w table endpoint --cluster status
-+------------------------+------------------+---------------+-----------------+---------+----------------+-----------+------------+-----------+------------+--------------------+--------+
-| ENDPOINT | ID | VERSION | STORAGE VERSION | DB SIZE | DB SIZE IN USE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
-+------------------------+------------------+---------------+-----------------+---------+----------------+-----------+------------+-----------+------------+--------------------+--------+
-| http://127.0.0.1:2379 | 8211f1d0f64f3269 | 3.6.0-alpha.0 | 3.6.0 | 25 kB | 25 kB | false | false | 2 | 8 | 8 | |
-| http://127.0.0.1:22379 | 91bc3c398fb3c146 | 3.6.0-alpha.0 | 3.6.0 | 25 kB | 25 kB | true | false | 2 | 8 | 8 | |
-| http://127.0.0.1:32379 | fd422379fda50e48 | 3.6.0-alpha.0 | 3.6.0 | 25 kB | 25 kB | false | false | 2 | 8 | 8 | |
-+------------------------+------------------+---------------+-----------------+---------+----------------+-----------+------------+-----------+------------+--------------------+--------+
+etcdctl -w table endpoint --cluster status
++------------------------+------------------+----------------+---------+-----------+-----------+------------+
+| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
++------------------------+------------------+----------------+---------+-----------+-----------+------------+
+| http://127.0.0.1:2379 | 8211f1d0f64f3269 | 3.2.0-rc.1+git | 25 kB | false | 2 | 8 |
+| http://127.0.0.1:22379 | 91bc3c398fb3c146 | 3.2.0-rc.1+git | 25 kB | false | 2 | 8 |
+| http://127.0.0.1:32379 | fd422379fda50e48 | 3.2.0-rc.1+git | 25 kB | true | 2 | 8 |
++------------------------+------------------+----------------+---------+-----------+-----------+------------+
```
### ENDPOINT HASHKV
@@ -837,73 +817,28 @@ Prints a line of JSON encoding each endpoint URL and KV history hash.
Get the hash for the default endpoint:
```bash
-./etcdctl endpoint hashkv --cluster
-http://127.0.0.1:2379, 2064120424, 13
-http://127.0.0.1:22379, 2064120424, 13
-http://127.0.0.1:32379, 2064120424, 13
+etcdctl endpoint hashkv
+# 127.0.0.1:2379, 1084519789
```
Get the status for the default endpoint as JSON:
```bash
-./etcdctl endpoint hash --cluster -w json | jq
-[
- {
- "Endpoint": "http://127.0.0.1:2379",
- "HashKV": {
- "header": {
- "cluster_id": 17237436991929494000,
- "member_id": 9372538179322590000,
- "revision": 13,
- "raft_term": 2
- },
- "hash": 2064120424,
- "compact_revision": -1,
- "hash_revision": 13
- }
- },
- {
- "Endpoint": "http://127.0.0.1:22379",
- "HashKV": {
- "header": {
- "cluster_id": 17237436991929494000,
- "member_id": 10501334649042878000,
- "revision": 13,
- "raft_term": 2
- },
- "hash": 2064120424,
- "compact_revision": -1,
- "hash_revision": 13
- }
- },
- {
- "Endpoint": "http://127.0.0.1:32379",
- "HashKV": {
- "header": {
- "cluster_id": 17237436991929494000,
- "member_id": 18249187646912140000,
- "revision": 13,
- "raft_term": 2
- },
- "hash": 2064120424,
- "compact_revision": -1,
- "hash_revision": 13
- }
- }
-]
+etcdctl -w json endpoint hashkv
+# [{"Endpoint":"127.0.0.1:2379","Hash":{"header":{"cluster_id":14841639068965178418,"member_id":10276657743932975437,"revision":1,"raft_term":3},"hash":1084519789,"compact_revision":-1}}]
```
Get the status for all endpoints in the cluster associated with the default endpoint:
```bash
-$ ./etcdctl endpoint hash --cluster -w table
-+------------------------+-----------+---------------+
-| ENDPOINT | HASH | HASH REVISION |
-+------------------------+-----------+---------------+
-| http://127.0.0.1:2379 | 784522900 | 16 |
-| http://127.0.0.1:22379 | 784522900 | 16 |
-| http://127.0.0.1:32379 | 784522900 | 16 |
-+------------------------+-----------+---------------+
+etcdctl -w table endpoint --cluster hashkv
++------------------------+------------+
+| ENDPOINT | HASH |
++------------------------+------------+
+| http://127.0.0.1:2379 | 1084519789 |
+| http://127.0.0.1:22379 | 1084519789 |
+| http://127.0.0.1:32379 | 1084519789 |
++------------------------+------------+
```
### ALARM \
@@ -912,7 +847,7 @@ Provides alarm related commands
### ALARM DISARM
-`alarm disarm` Disarms all alarms
+`alarm disarm` 解除所有警报
RPC: Alarm
@@ -923,19 +858,19 @@ RPC: Alarm
#### Examples
```bash
-./etcdctl alarm disarm
+etcdctl alarm disarm
```
If NOSPACE alarm is present:
```bash
-./etcdctl alarm disarm
+etcdctl alarm disarm
# alarm:NOSPACE
```
### ALARM LIST
-`alarm list` lists all alarms.
+`alarm list` 列出所有警报.
RPC: Alarm
@@ -946,26 +881,35 @@ RPC: Alarm
#### Examples
```bash
-./etcdctl alarm list
+etcdctl alarm list
```
If NOSPACE alarm is present:
```bash
-./etcdctl alarm list
+etcdctl alarm list
# alarm:NOSPACE
```
### DEFRAG [options]
-DEFRAG defragments the backend database file for a set of given endpoints while etcd is running. When an etcd member reclaims storage space from deleted and compacted keys, the space is kept in a free list and the database file remains the same size. By defragmenting the database, the etcd member releases this free space back to the file system.
+DEFRAG defragments the backend database file for a set of given endpoints while etcd is running, ~~or directly
+defragments an etcd data directory while etcd is not running~~. When an etcd member reclaims storage space from deleted
+and compacted keys, the space is kept in a free list and the database file remains the same size. By defragmenting the
+database, the etcd member releases this free space back to the file system.
**Note: to defragment offline (`--data-dir` flag), use: `etcutl defrag` instead**
-**Note that defragmentation to a live member blocks the system from reading and writing data while rebuilding its states.**
+**Note that defragmentation to a live member blocks the system from reading and writing data while rebuilding its
+states.**
+
+**Note that defragmentation request does not get replicated over cluster. That is, the request is only applied to the
+local node. Specify all members in `--endpoints` flag or `--cluster` flag to automatically find all cluster members.**
-**Note that defragmentation request does not get replicated over cluster. That is, the request is only applied to the local node. Specify all members in `--endpoints` flag or `--cluster` flag to automatically find all cluster members.**
+#### Options
+- data-dir -- Optional. **Deprecated**. If present, defragments a data directory not in use by etcd. To backend removed in
+ v3.6.
#### Output
@@ -974,7 +918,7 @@ For each endpoints, prints a message indicating whether the endpoint was success
#### Example
```bash
-./etcdctl --endpoints=localhost:2379,badendpoint:2379 defrag
+etcdctl --endpoints=localhost:2379,badendpoint:2379 defrag
# Finished defragmenting etcd member[localhost:2379]
# Failed to defragment etcd member[badendpoint:2379] (grpc: timed out trying to connect)
```
@@ -982,12 +926,22 @@ For each endpoints, prints a message indicating whether the endpoint was success
Run defragment operations for all endpoints in the cluster associated with the default endpoint:
```bash
-./etcdctl defrag --cluster
+etcdctl defrag --cluster
Finished defragmenting etcd member[http://127.0.0.1:2379]
Finished defragmenting etcd member[http://127.0.0.1:22379]
Finished defragmenting etcd member[http://127.0.0.1:32379]
```
+To defragment a data directory directly, use the `etcdutl` with `--data-dir` flag
+(`etcdctl` will remove this flag in v3.6):
+
+``` bash
+# Defragment while etcd is not running
+etcdutl defrag --data-dir default.etcd
+# success (exit status 0)
+# Error: cannot open database at default.etcd/member/snap/db
+```
+
#### Remarks
DEFRAG returns a zero exit code only if it succeeded defragmenting all given endpoints.
@@ -1007,110 +961,125 @@ The backend snapshot is written to the given file path.
#### Example
Save a snapshot to "snapshot.db":
+
```
-./etcdctl snapshot save snapshot.db
+etcdctl snapshot save snapshot.db
```
### SNAPSHOT RESTORE [options] \
-Removed in v3.6. Use `etcdutl snapshot restore` instead.
+Note: Deprecated. Use `etcdutl snapshot restore` instead. To backend removed in v3.6.
+SNAPSHOT RESTORE creates an etcd data directory for an etcd cluster member from a backend database snapshot and a new
+cluster configuration. Restoring the snapshot into each member for a new cluster configuration will initialize a new
+etcd cluster preloaded by the snapshot data.
-### SNAPSHOT STATUS \
+#### Options
-Removed in v3.6. Use `etcdutl snapshot status` instead.
+The snapshot restore options closely resemble to those used in the `etcd` command for defining a cluster.
-### MOVE-LEADER \
+- data-dir -- Path to the data directory. Uses \.etcd if none given.
-MOVE-LEADER transfers leadership from the leader to another member in the cluster.
+- wal-dir -- Path to the WAL directory. Uses data directory if none given.
-#### Example
+- initial-cluster -- The initial cluster configuration for the restored etcd cluster.
-```bash
-# to choose transferee
-transferee_id=$(./etcdctl \
- --endpoints localhost:2379,localhost:22379,localhost:32379 \
- endpoint status | grep -m 1 "false" | awk -F', ' '{print $2}')
-echo ${transferee_id}
-# c89feb932daef420
+- initial-cluster-token -- Initial cluster token for the restored etcd cluster.
-# endpoints should include leader node
-./etcdctl --endpoints ${transferee_ep} move-leader ${transferee_id}
-# Error: no leader endpoint given at [localhost:22379 localhost:32379]
+- initial-advertise-peer-urls -- List of peer URLs for the member being restored.
-# request to leader with target node ID
-./etcdctl --endpoints ${leader_ep} move-leader ${transferee_id}
-# Leadership transferred from 45ddc0e800e20b93 to c89feb932daef420
-```
+- name -- Human-readable name for the etcd cluster member being restored.
-### DOWNGRADE \
+- skip-hash-check -- Ignore snapshot integrity hash value (required if copied from data directory)
-NOTICE: Downgrades is an experimental feature in v3.6 and is not recommended for production clusters.
+#### Output
-Downgrade provides commands to downgrade cluster.
-Normally etcd members cannot be downgraded due to cluster version mechanism.
+A new etcd data directory initialized with the snapshot.
-After initial bootstrap, cluster members agree on the cluster version. Every 5 seconds, leader checks versions of all members and picks lowers minor version.
-New members will refuse joining cluster with cluster version newer than theirs, thus preventing cluster from downgrading.
-Downgrade commands allow cluster administrator to force cluster version to be lowered to previous minor version, thus allowing to downgrade the cluster.
+#### Example
-Downgrade should be executed in stages:
-1. Verify that cluster is ready to be downgraded by running `etcdctl downgrade validate `
-2. Start the downgrade process by running `etcdctl downgrade enable `
-3. For each cluster member:
- 1. Ensure that member is ready for downgrade by confirming that it wrote `The server is ready to downgrade` log.
- 2. Replace member binary with one with older version.
- 3. Confirm that member has correctly started and joined the cluster.
-4. Ensure that downgrade process has succeeded by checking leader log for `the cluster has been downgraded`
+Save a snapshot, restore into a new 3 node cluster, and start the cluster:
-Downgrade can be canceled by running `etcdctl downgrade cancel` command.
+```
+etcdctl snapshot save snapshot.db
-In case of downgrade being canceled, cluster version will return to its normal behavior (pick the lowest member minor version).
-If no members were downgraded, cluster version will return to original value.
-If at least one member was downgraded, cluster version will stay at the `` until downgraded members are upgraded back.
+# restore members
+bin/etcdctl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
+bin/etcdctl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:22380 --name sshot2 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
+bin/etcdctl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:32380 --name sshot3 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
-### DOWNGRADE VALIDATE \
+# launch members
+bin/etcd --name sshot1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 &
+bin/etcd --name sshot2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 &
+bin/etcd --name sshot3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 &
+```
-DOWNGRADE VALIDATE validate downgrade capability before starting downgrade.
+### SNAPSHOT STATUS \
-#### Example
+Note: Deprecated. Use `etcdutl snapshot restore` instead. To backend removed in v3.6.
-```bash
-./etcdctl downgrade validate 3.5
-Downgrade validate success, cluster version 3.6
+SNAPSHOT STATUS lists information about a given backend database snapshot file.
-./etcdctl downgrade validate 3.4
-Error: etcdserver: invalid downgrade target version
+#### Output
-```
+##### Simple format
-### DOWNGRADE ENABLE \
+Prints a humanized table of the database hash, revision, total keys, and size.
-DOWNGRADE ENABLE starts a downgrade action to cluster.
+##### JSON format
-#### Example
+Prints a line of JSON encoding the database hash, revision, total keys, and size.
+
+#### Examples
```bash
-./etcdctl downgrade enable 3.5
-Downgrade enable success, cluster version 3.6
+etcdctl snapshot status file.db
+# cf1550fb, 3, 3, 25 kB
```
-### DOWNGRADE CANCEL \
+```bash
+etcdctl --write-out=json snapshot status file.db
+# {"hash":3474280699,"revision":3,"totalKey":3,"totalSize":24576}
+```
+
+```bash
+etcdctl --write-out=table snapshot status file.db
++----------+----------+------------+------------+
+| HASH | REVISION | TOTAL KEYS | TOTAL SIZE |
++----------+----------+------------+------------+
+| cf1550fb | 3 | 3 | 25 kB |
++----------+----------+------------+------------+
+```
-DOWNGRADE CANCEL cancels the ongoing downgrade action to cluster.
+### MOVE-LEADER \
+
+MOVE-LEADER transfers leadership from the leader to another member in the cluster.
#### Example
```bash
-./etcdctl downgrade cancel
-Downgrade cancel success, cluster version 3.5
+# to choose transferee
+transferee_id=$(etcdctl \
+ --endpoints localhost:2379,localhost:22379,localhost:32379 \
+ endpoint status | grep -m 1 "false" | awk -F', ' '{print $2}')
+echo ${transferee_id}
+# c89feb932daef420
+
+# endpoints should include leader node
+etcdctl --endpoints ${transferee_ep} move-leader ${transferee_id}
+# Error: no leader endpoint given at [localhost:22379 localhost:32379]
+
+# request to leader with target node ID
+etcdctl --endpoints ${leader_ep} move-leader ${transferee_id}
+# Leadership transferred from 45ddc0e800e20b93 to c89feb932daef420
```
## Concurrency commands
### LOCK [options] \ [command arg1 arg2 ...]
-LOCK acquires a distributed mutex with a given name. Once the lock is acquired, it will be held until etcdctl is terminated.
+LOCK acquires a distributed mutex with a given name. Once the lock is acquired, it will backend held until etcdctl is
+terminated.
#### Options
@@ -1120,27 +1089,29 @@ LOCK acquires a distributed mutex with a given name. Once the lock is acquired,
Once the lock is acquired but no command is given, the result for the GET on the unique lock holder key is displayed.
-If a command is given, it will be executed with environment variables `ETCD_LOCK_KEY` and `ETCD_LOCK_REV` set to the lock's holder key and revision.
+If a command is given, it will backend executed with environment variables `ETCD_LOCK_KEY` and `ETCD_LOCK_REV` set to the
+lock's holder key and revision.
#### Example
Acquire lock with standard output display:
```bash
-./etcdctl lock mylock
+etcdctl lock mylock
# mylock/1234534535445
```
Acquire lock and execute `echo lock acquired`:
```bash
-./etcdctl lock mylock echo lock acquired
+etcdctl lock mylock echo lock acquired
# lock acquired
```
Acquire lock and execute `etcdctl put` command
+
```bash
-./etcdctl lock mylock ./etcdctl put foo bar
+etcdctl lock mylock etcdctl put foo bar
# OK
```
@@ -1148,13 +1119,14 @@ Acquire lock and execute `etcdctl put` command
LOCK returns a zero exit code only if it is terminated by a signal and releases the lock.
-If LOCK is abnormally terminated or fails to contact the cluster to release the lock, the lock will remain held until the lease expires. Progress may be delayed by up to the default lease length of 60 seconds.
+If LOCK is abnormally terminated or fails to contact the cluster to release the lock, the lock will remain held until
+the lease expires. Progress may backend delayed by up to the default lease length of 60 seconds.
### ELECT [options] \ [proposal]
-ELECT participates on a named election. A node announces its candidacy in the election by providing
-a proposal value. If a node wishes to observe the election, ELECT listens for new leaders values.
-Whenever a leader is elected, its proposal is given as output.
+ELECT participates on a named election. A node announces its candidacy in the election by providing a proposal value. If
+a node wishes to observe the election, ELECT listens for new leaders values. Whenever a leader is elected, its proposal
+is given as output.
#### Options
@@ -1169,7 +1141,7 @@ Whenever a leader is elected, its proposal is given as output.
#### Example
```bash
-./etcdctl elect myelection foo
+etcdctl elect myelection foo
# myelection/1456952310051373265
# foo
```
@@ -1178,13 +1150,15 @@ Whenever a leader is elected, its proposal is given as output.
ELECT returns a zero exit code only if it is terminated by a signal and can revoke its candidacy or leadership, if any.
-If a candidate is abnormally terminated, election progress may be delayed by up to the default lease length of 60 seconds.
+If a candidate is abnormally terminated, election rogress may backend delayed by up to the default lease length of 60
+seconds.
## Authentication commands
### AUTH \
-`auth enable` activates authentication on an etcd cluster and `auth disable` deactivates. When authentication is enabled, etcd checks all requests for appropriate authorization.
+`auth enable` activates authentication on an etcd cluster and `auth disable` deactivates. When authentication is
+enabled, etcd checks all requests for appropriate authorization.
RPC: AuthEnable/AuthDisable
@@ -1195,28 +1169,28 @@ RPC: AuthEnable/AuthDisable
#### Examples
```bash
-./etcdctl user add root
+etcdctl user add root
# Password of root:#type password for root
# Type password of root again for confirmation:#re-type password for root
# User root created
-./etcdctl user grant-role root root
+etcdctl user grant-role root root
# Role root is granted to user root
-./etcdctl user get root
+etcdctl user get root
# User: root
# Roles: root
-./etcdctl role add root
+etcdctl role add root
# Role root created
-./etcdctl role get root
+etcdctl role get root
# Role root
# KV Read:
# KV Write:
-./etcdctl auth enable
+etcdctl auth enable
# Authentication Enabled
```
### ROLE \
-ROLE is used to specify different roles which can be assigned to etcd user(s).
+ROLE is used to specify different roles which can backend assigned to etcd user(s).
### ROLE ADD \
@@ -1231,7 +1205,7 @@ RPC: RoleAdd
#### Examples
```bash
-./etcdctl --user=root:123 role add myrole
+etcdctl --user=root:123 role add myrole
# Role myrole created
```
@@ -1248,7 +1222,7 @@ Detailed role information.
#### Examples
```bash
-./etcdctl --user=root:123 role get myrole
+etcdctl --user=root:123 role get myrole
# Role myrole
# KV Read:
# foo
@@ -1269,7 +1243,7 @@ RPC: RoleDelete
#### Examples
```bash
-./etcdctl --user=root:123 role delete myrole
+etcdctl --user=root:123 role delete myrole
# Role myrole deleted
```
@@ -1286,7 +1260,7 @@ A role per line.
#### Examples
```bash
-./etcdctl --user=root:123 role list
+etcdctl --user=root:123 role list
# roleA
# roleB
# myrole
@@ -1313,14 +1287,14 @@ RPC: RoleGrantPermission
Grant read and write permission on the key `foo` to role `myrole`:
```bash
-./etcdctl --user=root:123 role grant-permission myrole readwrite foo
+etcdctl --user=root:123 role grant-permission myrole readwrite foo
# Role myrole updated
```
Grant read permission on the wildcard key pattern `foo/*` to role `myrole`:
```bash
-./etcdctl --user=root:123 role grant-permission --prefix myrole readwrite foo/
+etcdctl --user=root:123 role grant-permission --prefix myrole readwrite foo/
# Role myrole updated
```
@@ -1338,12 +1312,13 @@ RPC: RoleRevokePermission
#### Output
-`Permission of key is revoked from role ` for single key. `Permission of range [, ) is revoked from role ` for a key range. Exit code is zero.
+`Permission of key is revoked from role ` for single
+key. `Permission of range [, ) is revoked from role ` for a key range. Exit code is zero.
#### Examples
```bash
-./etcdctl --user=root:123 role revoke-permission myrole foo
+etcdctl --user=root:123 role revoke-permission myrole foo
# Permission of key foo is revoked from role myrole
```
@@ -1368,7 +1343,7 @@ RPC: UserAdd
#### Examples
```bash
-./etcdctl --user=root:123 user add myuser
+etcdctl --user=root:123 user add myuser
# Password of myuser: #type password for my user
# Type password of myuser again for confirmation:#re-type password for my user
# User myuser created
@@ -1391,7 +1366,7 @@ Detailed user information.
#### Examples
```bash
-./etcdctl --user=root:123 user get myuser
+etcdctl --user=root:123 user get myuser
# User: myuser
# Roles:
```
@@ -1409,7 +1384,7 @@ RPC: UserDelete
#### Examples
```bash
-./etcdctl --user=root:123 user delete myuser
+etcdctl --user=root:123 user delete myuser
# User myuser deleted
```
@@ -1426,7 +1401,7 @@ RPC: UserList
#### Examples
```bash
-./etcdctl --user=root:123 user list
+etcdctl --user=root:123 user list
# user1
# user2
# myuser
@@ -1449,7 +1424,7 @@ RPC: UserChangePassword
#### Examples
```bash
-./etcdctl --user=root:123 user passwd myuser
+etcdctl --user=root:123 user passwd myuser
# Password of myuser: #type new password for my user
# Type password of myuser again for confirmation: #re-type the new password for my user
# Password updated
@@ -1468,7 +1443,7 @@ RPC: UserGrantRole
#### Examples
```bash
-./etcdctl --user=root:123 user grant-role userA roleA
+etcdctl --user=root:123 user grant-role userA roleA
# Role roleA is granted to user userA
```
@@ -1485,7 +1460,7 @@ RPC: UserRevokeRole
#### Examples
```bash
-./etcdctl --user=root:123 user revoke-role userA roleA
+etcdctl --user=root:123 user revoke-role userA roleA
# Role roleA is revoked from user userA
```
@@ -1511,8 +1486,6 @@ RPC: UserRevokeRole
- dest-insecure-transport -- Disable transport security for client connections
-- max-txn-ops -- Maximum number of operations permitted in a transaction during syncing updates
-
#### Output
The approximate total number of keys transferred to the destination cluster, updated every 30 seconds.
@@ -1520,13 +1493,12 @@ The approximate total number of keys transferred to the destination cluster, upd
#### Examples
```
-./etcdctl make-mirror mirror.example.com:2379
+etcdctl make-mirror mirror.example.com:2379
# 10
# 18
```
-[mirror]: ./doc/mirror_maker.md
-
+[mirror]: doc/mirror_maker.md
### VERSION
@@ -1539,7 +1511,7 @@ Prints etcd version and API version.
#### Examples
```bash
-./etcdctl version
+etcdctl version
# etcdctl version: 3.1.0-alpha.0+git
# API version: 3.1
```
@@ -1550,27 +1522,9 @@ CHECK provides commands for checking properties of the etcd cluster.
### CHECK PERF [options]
-CHECK PERF checks the performance of the etcd cluster for 60 seconds. Running the `check perf` often can create a large keyspace history which can be auto compacted and defragmented using the `--auto-compact` and `--auto-defrag` options as described below.
-
-Notice that different workload models use different configurations in terms of number of clients and throughtput. Here is the configuration for each load:
-
-
-| Load | Number of clients | Number of put requests (requests/sec) |
-|---------|------|---------|
-| Small | 50 | 10000 |
-| Medium | 200 | 100000 |
-| Large | 500 | 1000000 |
-| xLarge | 1000 | 3000000 |
-
-The test checks for the following conditions:
-
-- The throughput should be at least 90% of the issued requets
-- All the requests should be done in less than 500 ms
-- The standard deviation of the requests should be less than 100 ms
-
-
-Hence, a workload model may work while another one might fail.
-
+CHECK PERF checks the performance of the etcd cluster for 60 seconds. Running the `check perf` often can create a large
+keyspace history which can backend auto compacted and defragmented using the `--auto-compact` and `--auto-defrag` options as
+described below.
RPC: CheckPerf
@@ -1586,20 +1540,22 @@ RPC: CheckPerf
#### Output
-Prints the result of performance check on different criteria like throughput. Also prints an overall status of the check as pass or fail.
+Prints the result of performance check on different criteria like throughput. Also prints an overall status of the check
+as pass or fail.
#### Examples
-Shows examples of both, pass and fail, status. The failure is due to the fact that a large workload was tried on a single node etcd cluster running on a laptop environment created for development and testing purpose.
+Shows examples of both, pass and fail, status. The failure is due to the fact that a large workload was tried on a
+single node etcd cluster running on a laptop environment created for development and testing purpose.
```bash
-./etcdctl check perf --load="s"
+etcdctl check perf --load="s"
# 60 / 60 Booooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo! 100.00%1m0s
# PASS: Throughput is 150 writes/s
# PASS: Slowest request took 0.087509s
# PASS: Stddev is 0.011084s
# PASS
-./etcdctl check perf --load="l"
+etcdctl check perf --load="l"
# 60 / 60 Booooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo! 100.00%1m0s
# FAIL: Throughput too low: 6808 writes/s
# PASS: Slowest request took 0.228191s
@@ -1609,7 +1565,9 @@ Shows examples of both, pass and fail, status. The failure is due to the fact th
### CHECK DATASCALE [options]
-CHECK DATASCALE checks the memory usage of holding data for different workloads on a given server endpoint. Running the `check datascale` often can create a large keyspace history which can be auto compacted and defragmented using the `--auto-compact` and `--auto-defrag` options as described below.
+CHECK DATASCALE checks the memory usage of holding data for different workloads on a given server endpoint. Running
+the `check datascale` often can create a large keyspace history which can backend auto compacted and defragmented using
+the `--auto-compact` and `--auto-defrag` options as described below.
RPC: CheckDatascale
@@ -1625,12 +1583,13 @@ RPC: CheckDatascale
#### Output
-Prints the system memory usage for a given workload. Also prints status of compact and defragment if related options are passed.
+Prints the system memory usage for a given workload. Also prints status of compact and defragment if related options are
+passed.
#### Examples
```bash
-./etcdctl check datascale --load="s" --auto-compact=true --auto-defrag=true
+etcdctl check datascale --load="s" --auto-compact=true --auto-defrag=true
# Start data scale check for work load [10000 key-value pairs, 1024 bytes per key-value, 50 concurrent clients].
# Compacting with revision 18346204
# Compacted with revision 18346204
@@ -1645,47 +1604,50 @@ For all commands, a successful execution return a zero exit code. All failures w
## Output formats
-All commands accept an output format by setting `-w` or `--write-out`. All commands default to the "simple" output format, which is meant to be human-readable. The simple format is listed in each command's `Output` description since it is customized for each command. If a command has a corresponding RPC, it will respect all output formats.
+All commands accept an output format by setting `-w` or `--write-out`. All commands default to the "simple" output
+format, which is meant to backend human-readable. The simple format is listed in each command's `Output` description since it
+is customized for each command. If a command has a corresponding RPC, it will respect all output formats.
-If a command fails, returning a non-zero exit code, an error string will be written to standard error regardless of output format.
+If a command fails, returning a non-zero exit code, an error string will backend written to standard error regardless of
+output format.
### Simple
-A format meant to be easy to parse and human-readable. Specific to each command.
+A format meant to backend easy to parse and human-readable. Specific to each command.
### JSON
-The JSON encoding of the command's [RPC response][etcdrpc]. Since etcd's RPCs use byte strings, the JSON output will encode keys and values in base64.
+The JSON encoding of the command's [RPC response][etcdrpc]. Since etcd's RPCs use byte strings, the JSON output will
+encode keys and values in base64.
Some commands without an RPC also support JSON; see the command's `Output` description.
### Protobuf
-The protobuf encoding of the command's [RPC response][etcdrpc]. If an RPC is streaming, the stream messages will be concetenated. If an RPC is not given for a command, the protobuf output is not defined.
+The protobuf encoding of the command's [RPC response][etcdrpc]. If an RPC is streaming, the stream messages will backend
+concetenated. If an RPC is not given for a command, the protobuf output is not defined.
### Fields
-An output format similar to JSON but meant to parse with coreutils. For an integer field named `Field`, it writes a line in the format `"Field" : %d` where `%d` is go's integer formatting. For byte array fields, it writes `"Field" : %q` where `%q` is go's quoted string formatting (e.g., `[]byte{'a', '\n'}` is written as `"a\n"`).
+An output format similar to JSON but meant to parse with coreutils. For an integer field named `Field`, it writes a line
+in the format `"Field" : %d` where `%d` is go's integer formatting. For byte array fields, it writes `"Field" : %q`
+where `%q` is go's quoted string formatting (e.g., `[]byte{'a', '\n'}` is written as `"a\n"`).
## Compatibility Support
-etcdctl is still in its early stage. We try out best to ensure fully compatible releases, however we might break compatibility to fix bugs or improve commands. If we intend to release a version of etcdctl with backward incompatibilities, we will provide notice prior to release and have instructions on how to upgrade.
+etcdctl is still in its early stage. We try out best to ensure fully compatible releases, however we might break
+compatibility to fix bugs or improve commands. If we intend to release a version of etcdctl with backward
+incompatibilities, we will provide notice prior to release and have instructions on how to upgrade.
### Input Compatibility
-Input includes the command name, its flags, and its arguments. We ensure backward compatibility of the input of normal commands in non-interactive mode.
+Input includes the command name, its flags, and its arguments. We ensure backward compatibility of the input of normal
+commands in non-interactive mode.
### Output Compatibility
-Output includes output from etcdctl and its exit code. etcdctl provides `simple` output format by default.
-We ensure compatibility for the `simple` output format of normal commands in non-interactive mode. Currently, we do not ensure
-backward compatibility for `JSON` format and the format in non-interactive mode. Currently, we do not ensure backward compatibility of utility commands.
-
-### TODO: compatibility with etcd server
+Output includes output from etcdctl and its exit code. etcdctl provides `simple` output format by default. We ensure
+compatibility for the `simple` output format of normal commands in non-interactive mode. Currently, we do not ensure
+backward compatibility for `JSON` format and the format in non-interactive mode. Currently, we do not ensure backward
+compatibility of utility commands.
-[etcd]: https://github.com/coreos/etcd
-[READMEv2]: READMEv2.md
-[v2key]: ../store/node_extern.go#L28-L37
-[v3key]: ../api/mvccpb/kv.proto#L12-L29
-[etcdrpc]: ../api/etcdserverpb/rpc.proto
-[storagerpc]: ../api/mvccpb/kv.proto
diff --git a/etcdctl/READMEv2.md b/etcdctl/READMEv2.md
deleted file mode 100644
index 8c7fc1e564b..00000000000
--- a/etcdctl/READMEv2.md
+++ /dev/null
@@ -1,336 +0,0 @@
-etcdctl
-========
-
-`etcdctl` is a command line client for [etcd][etcd].
-It can be used in scripts or for administrators to explore an etcd cluster.
-
-## Getting etcdctl
-
-The latest release is available as a binary at [Github][github-release] along with etcd.
-
-etcdctl can also be built from source using the build script found in the parent directory.
-
-## Configuration
-### --debug
-+ output cURL commands which can be used to reproduce the request
-
-### --no-sync
-+ don't synchronize cluster information before sending request
-+ Use this to access non-published client endpoints
-+ Without this flag, values from `--endpoint` flag will be overwritten by etcd cluster when it does internal sync.
-
-### --output, -o
-+ output response in the given format (`simple`, `extended` or `json`)
-+ default: `"simple"`
-
-### --discovery-srv, -D
-+ domain name to query for SRV records describing cluster endpoints
-+ default: none
-+ env variable: ETCDCTL_DISCOVERY_SRV
-
-### --peers
-+ a comma-delimited list of machine addresses in the cluster
-+ default: `"http://127.0.0.1:2379"`
-+ env variable: ETCDCTL_PEERS
-
-### --endpoint
-+ a comma-delimited list of machine addresses in the cluster
-+ default: `"http://127.0.0.1:2379"`
-+ env variable: ETCDCTL_ENDPOINT
-+ Without `--no-sync` flag, this will be overwritten by etcd cluster when it does internal sync.
-
-### --cert-file
-+ identify HTTPS client using this SSL certificate file
-+ default: none
-+ env variable: ETCDCTL_CERT_FILE
-
-### --key-file
-+ identify HTTPS client using this SSL key file
-+ default: none
-+ env variable: ETCDCTL_KEY_FILE
-
-### --ca-file
-+ verify certificates of HTTPS-enabled servers using this CA bundle
-+ default: none
-+ env variable: ETCDCTL_CA_FILE
-
-### --username, -u
-+ provide username[:password] and prompt if password is not supplied
-+ default: none
-+ env variable: ETCDCTL_USERNAME
-
-### --timeout
-+ connection timeout per request
-+ default: `"1s"`
-
-### --total-timeout
-+ timeout for the command execution (except watch)
-+ default: `"5s"`
-
-## Usage
-
-### Setting Key Values
-
-Set a value on the `/foo/bar` key:
-
-```sh
-$ etcdctl set /foo/bar "Hello world"
-Hello world
-```
-
-Set a value on the `/foo/bar` key with a value that expires in 60 seconds:
-
-```sh
-$ etcdctl set /foo/bar "Hello world" --ttl 60
-Hello world
-```
-
-Conditionally set a value on `/foo/bar` if the previous value was "Hello world":
-
-```sh
-$ etcdctl set /foo/bar "Goodbye world" --swap-with-value "Hello world"
-Goodbye world
-```
-
-Conditionally set a value on `/foo/bar` if the previous etcd index was 12:
-
-```sh
-$ etcdctl set /foo/bar "Goodbye world" --swap-with-index 12
-Goodbye world
-```
-
-Create a new key `/foo/bar`, only if the key did not previously exist:
-
-```sh
-$ etcdctl mk /foo/new_bar "Hello world"
-Hello world
-```
-
-Create a new in-order key under dir `/fooDir`:
-
-```sh
-$ etcdctl mk --in-order /fooDir "Hello world"
-```
-
-Create a new dir `/fooDir`, only if the key did not previously exist:
-
-```sh
-$ etcdctl mkdir /fooDir
-```
-
-Update an existing key `/foo/bar`, only if the key already existed:
-
-```sh
-$ etcdctl update /foo/bar "Hola mundo"
-Hola mundo
-```
-
-Create or update a directory called `/mydir`:
-
-```sh
-$ etcdctl setdir /mydir
-```
-
-
-### Retrieving a key value
-
-Get the current value for a single key in the local etcd node:
-
-```sh
-$ etcdctl get /foo/bar
-Hello world
-```
-
-Get the value of a key with additional metadata in a parseable format:
-
-```sh
-$ etcdctl -o extended get /foo/bar
-Key: /foo/bar
-Modified-Index: 72
-TTL: 0
-Etcd-Index: 72
-Raft-Index: 5611
-Raft-Term: 1
-
-Hello World
-```
-
-### Listing a directory
-
-Explore the keyspace using the `ls` command
-
-```sh
-$ etcdctl ls
-/akey
-/adir
-$ etcdctl ls /adir
-/adir/key1
-/adir/key2
-```
-
-Add `--recursive` to recursively list subdirectories encountered.
-
-```sh
-$ etcdctl ls --recursive
-/akey
-/adir
-/adir/key1
-/adir/key2
-```
-
-Directories can also have a trailing `/` added to output using `-p`.
-
-```sh
-$ etcdctl ls -p
-/akey
-/adir/
-```
-
-### Deleting a key
-
-Delete a key:
-
-```sh
-$ etcdctl rm /foo/bar
-```
-
-Delete an empty directory or a key-value pair
-
-```sh
-$ etcdctl rmdir /path/to/dir
-```
-
-or
-
-```sh
-$ etcdctl rm /path/to/dir --dir
-```
-
-Recursively delete a key and all child keys:
-
-```sh
-$ etcdctl rm /path/to/dir --recursive
-```
-
-Conditionally delete `/foo/bar` if the previous value was "Hello world":
-
-```sh
-$ etcdctl rm /foo/bar --with-value "Hello world"
-```
-
-Conditionally delete `/foo/bar` if the previous etcd index was 12:
-
-```sh
-$ etcdctl rm /foo/bar --with-index 12
-```
-
-### Watching for changes
-
-Watch for only the next change on a key:
-
-```sh
-$ etcdctl watch /foo/bar
-Hello world
-```
-
-Continuously watch a key:
-
-```sh
-$ etcdctl watch /foo/bar --forever
-Hello world
-.... client hangs forever until ctrl+C printing values as key change
-```
-
-Continuously watch a key, starting with a given etcd index:
-
-```sh
-$ etcdctl watch /foo/bar --forever --index 12
-Hello world
-.... client hangs forever until ctrl+C printing values as key change
-```
-
-Continuously watch a key and exec a program:
-
-```sh
-$ etcdctl exec-watch /foo/bar -- sh -c "env | grep ETCD"
-ETCD_WATCH_ACTION=set
-ETCD_WATCH_VALUE=My configuration stuff
-ETCD_WATCH_MODIFIED_INDEX=1999
-ETCD_WATCH_KEY=/foo/bar
-ETCD_WATCH_ACTION=set
-ETCD_WATCH_VALUE=My new configuration stuff
-ETCD_WATCH_MODIFIED_INDEX=2000
-ETCD_WATCH_KEY=/foo/bar
-```
-
-Continuously and recursively watch a key and exec a program:
-```sh
-$ etcdctl exec-watch --recursive /foo -- sh -c "env | grep ETCD"
-ETCD_WATCH_ACTION=set
-ETCD_WATCH_VALUE=My configuration stuff
-ETCD_WATCH_MODIFIED_INDEX=1999
-ETCD_WATCH_KEY=/foo/bar
-ETCD_WATCH_ACTION=set
-ETCD_WATCH_VALUE=My new configuration stuff
-ETCD_WATCH_MODIFIED_INDEX=2000
-ETCD_WATCH_KEY=/foo/barbar
-```
-
-## Return Codes
-
-The following exit codes can be returned from etcdctl:
-
-```
-0 Success
-1 Malformed etcdctl arguments
-2 Failed to connect to host
-3 Failed to auth (client cert rejected, ca validation failure, etc)
-4 400 error from etcd
-5 500 error from etcd
-```
-
-## Endpoint
-
-If the etcd cluster isn't available on `http://127.0.0.1:2379`, specify a `--endpoint` flag or `ETCDCTL_ENDPOINT` environment variable. One endpoint or a comma-separated list of endpoints can be listed. This option is ignored if the `--discovery-srv` option is provided.
-
-```sh
-ETCDCTL_ENDPOINT="http://10.0.28.1:4002" etcdctl set my-key to-a-value
-ETCDCTL_ENDPOINT="http://10.0.28.1:4002,http://10.0.28.2:4002,http://10.0.28.3:4002" etcdctl set my-key to-a-value
-etcdctl --endpoint http://10.0.28.1:4002 my-key to-a-value
-etcdctl --endpoint http://10.0.28.1:4002,http://10.0.28.2:4002,http://10.0.28.3:4002 etcdctl set my-key to-a-value
-```
-
-## Username and Password
-
-If the etcd cluster is protected by [authentication][authentication], specify username and password using the [`--username`][username-flag] or `ETCDCTL_USERNAME` environment variable. When `--username` flag or `ETCDCTL_USERNAME` environment variable doesn't contain password, etcdctl will prompt password in interactive mode.
-
-```sh
-ETCDCTL_USERNAME="root:password" etcdctl set my-key to-a-value
-```
-
-## DNS Discovery
-
-To discover the etcd cluster through domain SRV records, specify a `--discovery-srv` flag or `ETCDCTL_DISCOVERY_SRV` environment variable. This option takes precedence over the `--endpoint` flag.
-
-```sh
-ETCDCTL_DISCOVERY_SRV="some-domain" etcdctl set my-key to-a-value
-etcdctl --discovery-srv some-domain set my-key to-a-value
-```
-
-## Project Details
-
-### Versioning
-
-etcdctl uses [semantic versioning][semver].
-Releases will follow lockstep with the etcd release cycle.
-
-### License
-
-etcdctl is under the Apache 2.0 license. See the [LICENSE][license] file for details.
-
-[authentication]: https://github.com/etcd-io/website/blob/main/content/docs/v2/authentication.md
-[etcd]: https://github.com/coreos/etcd
-[github-release]: https://github.com/coreos/etcd/releases/
-[license]: ../LICENSE
-[semver]: http://semver.org/
-[username-flag]: #--username--u
diff --git a/etcdctl/ctlv3/command/alarm_command.go b/etcdctl/ctlv3/command/alarm_command.go
deleted file mode 100644
index 679f9d98f27..00000000000
--- a/etcdctl/ctlv3/command/alarm_command.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
-
- "github.com/spf13/cobra"
-
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewAlarmCommand returns the cobra command for "alarm".
-func NewAlarmCommand() *cobra.Command {
- ac := &cobra.Command{
- Use: "alarm ",
- Short: "Alarm related commands",
- }
-
- ac.AddCommand(NewAlarmDisarmCommand())
- ac.AddCommand(NewAlarmListCommand())
-
- return ac
-}
-
-func NewAlarmDisarmCommand() *cobra.Command {
- cmd := cobra.Command{
- Use: "disarm",
- Short: "Disarms all alarms",
- Run: alarmDisarmCommandFunc,
- }
- return &cmd
-}
-
-// alarmDisarmCommandFunc executes the "alarm disarm" command.
-func alarmDisarmCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("alarm disarm command accepts no arguments"))
- }
- ctx, cancel := commandCtx(cmd)
- resp, err := mustClientFromCmd(cmd).AlarmDisarm(ctx, &v3.AlarmMember{})
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- display.Alarm(*resp)
-}
-
-func NewAlarmListCommand() *cobra.Command {
- cmd := cobra.Command{
- Use: "list",
- Short: "Lists all alarms",
- Run: alarmListCommandFunc,
- }
- return &cmd
-}
-
-// alarmListCommandFunc executes the "alarm list" command.
-func alarmListCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("alarm list command accepts no arguments"))
- }
- ctx, cancel := commandCtx(cmd)
- resp, err := mustClientFromCmd(cmd).AlarmList(ctx)
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- display.Alarm(*resp)
-}
diff --git a/etcdctl/ctlv3/command/auth_command.go b/etcdctl/ctlv3/command/auth_command.go
index 0e443450013..ef61ea4b4ee 100644
--- a/etcdctl/ctlv3/command/auth_command.go
+++ b/etcdctl/ctlv3/command/auth_command.go
@@ -17,17 +17,16 @@ package command
import (
"fmt"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
-
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
)
// NewAuthCommand returns the cobra command for "auth".
func NewAuthCommand() *cobra.Command {
ac := &cobra.Command{
Use: "auth ",
- Short: "Enable or disable authentication",
+ Short: "启用或禁用身份验证",
}
ac.AddCommand(newAuthEnableCommand())
@@ -40,7 +39,7 @@ func NewAuthCommand() *cobra.Command {
func newAuthStatusCommand() *cobra.Command {
return &cobra.Command{
Use: "status",
- Short: "Returns authentication status",
+ Short: "返回验证状态",
Run: authStatusCommandFunc,
}
}
@@ -48,7 +47,7 @@ func newAuthStatusCommand() *cobra.Command {
// authStatusCommandFunc executes the "auth status" command.
func authStatusCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth status command does not accept any arguments"))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth status命令不接受任何参数"))
}
ctx, cancel := commandCtx(cmd)
@@ -64,7 +63,7 @@ func authStatusCommandFunc(cmd *cobra.Command, args []string) {
func newAuthEnableCommand() *cobra.Command {
return &cobra.Command{
Use: "enable",
- Short: "Enables authentication",
+ Short: "启用身份验证",
Run: authEnableCommandFunc,
}
}
@@ -72,7 +71,7 @@ func newAuthEnableCommand() *cobra.Command {
// authEnableCommandFunc executes the "auth enable" command.
func authEnableCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth enable command does not accept any arguments"))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth enable命令不接受任何参数"))
}
ctx, cancel := commandCtx(cmd)
@@ -96,13 +95,13 @@ func authEnableCommandFunc(cmd *cobra.Command, args []string) {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
- fmt.Println("Authentication Enabled")
+ fmt.Println("身份验证启用")
}
func newAuthDisableCommand() *cobra.Command {
return &cobra.Command{
Use: "disable",
- Short: "Disables authentication",
+ Short: "禁用身份验证",
Run: authDisableCommandFunc,
}
}
@@ -110,7 +109,7 @@ func newAuthDisableCommand() *cobra.Command {
// authDisableCommandFunc executes the "auth disable" command.
func authDisableCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth disable command does not accept any arguments"))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("auth disable命令不接受任何参数"))
}
ctx, cancel := commandCtx(cmd)
@@ -120,5 +119,5 @@ func authDisableCommandFunc(cmd *cobra.Command, args []string) {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
- fmt.Println("Authentication Disabled")
+ fmt.Println("身份验证禁用")
}
diff --git a/etcdctl/ctlv3/command/check.go b/etcdctl/ctlv3/command/check.go
index 354e78aa31b..3daf832a2bf 100644
--- a/etcdctl/ctlv3/command/check.go
+++ b/etcdctl/ctlv3/command/check.go
@@ -26,18 +26,21 @@ import (
"sync"
"time"
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.etcd.io/etcd/pkg/v3/report"
+ "github.com/ls-2018/etcd_cn/code_debug/conf"
+
+ "gopkg.in/cheggaaa/pb.v1"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/ls-2018/etcd_cn/pkg/report"
- "github.com/cheggaaa/pb/v3"
"github.com/spf13/cobra"
"golang.org/x/time/rate"
)
var (
checkPerfLoad string
- checkPerfPrefix string
+ checkPerfPrefix string // 写入的数据的key前缀
checkDatascaleLoad string
checkDatascalePrefix string
autoCompact bool
@@ -45,8 +48,8 @@ var (
)
type checkPerfCfg struct {
- limit int
- clients int
+ limit int // 每秒并发数
+ clients int // 客户端
duration int
}
@@ -59,7 +62,7 @@ var checkPerfCfgMap = map[string]checkPerfCfg{
},
"m": {
limit: 1000,
- clients: 200,
+ clients: 200, //
duration: 60,
},
"l": {
@@ -108,7 +111,7 @@ var checkDatascaleCfgMap = map[string]checkDatascaleCfg{
func NewCheckCommand() *cobra.Command {
cc := &cobra.Command{
Use: "check ",
- Short: "commands for checking properties of the etcd cluster",
+ Short: "etcd集群属性检查命令",
}
cc.AddCommand(NewCheckPerfCommand())
@@ -121,25 +124,22 @@ func NewCheckCommand() *cobra.Command {
func NewCheckPerfCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "perf [options]",
- Short: "Check the performance of the etcd cluster",
+ Short: "查看etcd集群的性能",
Run: newCheckPerfCommand,
}
- // TODO: support customized configuration
- cmd.Flags().StringVar(&checkPerfLoad, "load", "s", "The performance check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge). Different workload models use different configurations in terms of number of clients and expected throughtput.")
- cmd.Flags().StringVar(&checkPerfPrefix, "prefix", "/etcdctl-check-perf/", "The prefix for writing the performance check's keys.")
- cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.")
- cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.")
- cmd.RegisterFlagCompletionFunc("load", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
- return []string{"small", "medium", "large", "xLarge"}, cobra.ShellCompDirectiveDefault
- })
+ cmd.Flags().StringVar(&checkPerfLoad, "load", "s", "性能检查的工作负载模型.接受工作负载: s(small), m(medium), l(large), xl(xLarge)")
+ cmd.Flags().StringVar(&checkPerfPrefix, "prefix", "/etcdctl-check-perf/", "写性能检查键的前缀.")
+ cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "测试完成后,压缩修订版本")
+ cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "测试完成后 碎片整理")
return cmd
}
// newCheckPerfCommand executes the "check perf" command.
func newCheckPerfCommand(cmd *cobra.Command, args []string) {
- var checkPerfAlias = map[string]string{
+ conf.Perf = true
+ checkPerfAlias := map[string]string{
"s": "s", "small": "s",
"m": "m", "medium": "m",
"l": "l", "large": "l",
@@ -152,40 +152,46 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) {
}
cfg := checkPerfCfgMap[model]
- requests := make(chan v3.Op, cfg.clients)
+ requests := make(chan v3.Op, cfg.clients) // 并发数
limit := rate.NewLimiter(rate.Limit(cfg.limit), 1)
cc := clientConfigFromCmd(cmd)
clients := make([]*v3.Client, cfg.clients)
for i := 0; i < cfg.clients; i++ {
- clients[i] = mustClient(cc)
+ clients[i] = cc.mustClient()
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second)
defer cancel()
- ctx, icancel := interruptableContext(ctx, func() { attemptCleanup(clients[0], false) })
+ ctx, icancel := interruptableContext(ctx, func() {
+ attemptCleanup(clients[0], false) // 压缩修订版本
+ })
defer icancel()
gctx, gcancel := context.WithCancel(ctx)
+ // 判断前缀有没有值
resp, err := clients[0].Get(gctx, checkPerfPrefix, v3.WithPrefix(), v3.WithLimit(1))
gcancel()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
if len(resp.Kvs) > 0 {
- cobrautl.ExitWithError(cobrautl.ExitInvalidInput, fmt.Errorf("prefix %q has keys. Delete with 'etcdctl del --prefix %s' first", checkPerfPrefix, checkPerfPrefix))
+ cobrautl.ExitWithError(cobrautl.ExitInvalidInput, fmt.Errorf("前缀 %q 有值了. Delete with 'etcdctl del --prefix %s' first", checkPerfPrefix, checkPerfPrefix))
}
ksize, vsize := 256, 1024
k, v := make([]byte, ksize), string(make([]byte, vsize))
+ // display
bar := pb.New(cfg.duration)
+ bar.Format("Bom !")
bar.Start()
r := report.NewReport("%4.4f")
var wg sync.WaitGroup
wg.Add(len(clients))
+
for i := range clients {
go func(c *v3.Client) {
defer wg.Done()
@@ -206,7 +212,7 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) {
}
close(requests)
}()
-
+ // 倒计时
go func() {
for i := 0; i < cfg.duration; i++ {
time.Sleep(time.Second)
@@ -231,7 +237,7 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) {
ok = true
if len(s.ErrorDist) != 0 {
- fmt.Println("FAIL: too many errors")
+ fmt.Println("FAIL: 错误太多")
for k, v := range s.ErrorDist {
fmt.Printf("FAIL: ERROR(%v) -> %d\n", k, v)
}
@@ -239,19 +245,19 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) {
}
if s.RPS/float64(cfg.limit) <= 0.9 {
- fmt.Printf("FAIL: Throughput too low: %d writes/s\n", int(s.RPS)+1)
+ fmt.Printf("FAIL: 吞吐量太慢: %d writes/s\n", int(s.RPS)+1)
ok = false
} else {
- fmt.Printf("PASS: Throughput is %d writes/s\n", int(s.RPS)+1)
+ fmt.Printf("PASS: 吞吐量 is %d writes/s\n", int(s.RPS)+1)
}
if s.Slowest > 0.5 { // slowest request > 500ms
- fmt.Printf("Slowest request took too long: %fs\n", s.Slowest)
+ fmt.Printf("最慢的请求耗时太长: %fs\n", s.Slowest)
ok = false
} else {
fmt.Printf("PASS: Slowest request took %fs\n", s.Slowest)
}
if s.Stddev > 0.1 { // stddev > 100ms
- fmt.Printf("Stddev too high: %fs\n", s.Stddev)
+ fmt.Printf("Stddev太高: %fs\n", s.Stddev)
ok = false
} else {
fmt.Printf("PASS: Stddev is %fs\n", s.Stddev)
@@ -265,16 +271,17 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) {
}
}
+// 尝试清理
func attemptCleanup(client *v3.Client, autoCompact bool) {
dctx, dcancel := context.WithTimeout(context.Background(), 30*time.Second)
defer dcancel()
dresp, err := client.Delete(dctx, checkPerfPrefix, v3.WithPrefix())
if err != nil {
- fmt.Printf("FAIL: Cleanup failed during key deletion: ERROR(%v)\n", err)
+ fmt.Printf("FAIL:删除键时清除失败 : ERROR(%v)\n", err)
return
}
if autoCompact {
- compact(client, dresp.Header.Revision)
+ compact(client, dresp.Header.Revision) // 压缩修订版本
}
}
@@ -297,22 +304,22 @@ func interruptableContext(ctx context.Context, attemptCleanup func()) (context.C
func NewCheckDatascaleCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "datascale [options]",
- Short: "Check the memory usage of holding data for different workloads on a given server endpoint.",
- Long: "If no endpoint is provided, localhost will be used. If multiple endpoints are provided, first endpoint will be used.",
+ Short: "检查给定etcd端点上保存不同工作负载的数据的内存使用情况.",
+ Long: "如果没有提供端点,则将使用localhost.如果提供了多个端点,则将使用第一个端点.",
Run: newCheckDatascaleCommand,
}
- cmd.Flags().StringVar(&checkDatascaleLoad, "load", "s", "The datascale check's workload model. Accepted workloads: s(small), m(medium), l(large), xl(xLarge)")
- cmd.Flags().StringVar(&checkDatascalePrefix, "prefix", "/etcdctl-check-datascale/", "The prefix for writing the datascale check's keys.")
- cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.")
- cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.")
+ cmd.Flags().StringVar(&checkDatascaleLoad, "load", "s", "数据刻度检查的工作负载模型.接受工作负载: s(small), m(medium), l(large), xl(xLarge)")
+ cmd.Flags().StringVar(&checkDatascalePrefix, "prefix", "/etcdctl-check-datascale/", "用于写入数据刻度校验键的前缀.")
+ cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "测试完成后压缩修订版本")
+ cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "测试完成后碎片整理")
return cmd
}
// newCheckDatascaleCommand executes the "check datascale" command.
func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
- var checkDatascaleAlias = map[string]string{
+ checkDatascaleAlias := map[string]string{
"s": "s", "small": "s",
"m": "m", "medium": "m",
"l": "l", "large": "l",
@@ -330,7 +337,7 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
cc := clientConfigFromCmd(cmd)
clients := make([]*v3.Client, cfg.clients)
for i := 0; i < cfg.clients; i++ {
- clients[i] = mustClient(cc)
+ clients[i] = cc.mustClient()
}
// get endpoints
@@ -361,12 +368,13 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
// get the process_resident_memory_bytes and process_virtual_memory_bytes before the put operations
bytesBefore := endpointMemoryMetrics(eps[0], sec)
if bytesBefore == 0 {
- fmt.Println("FAIL: Could not read process_resident_memory_bytes before the put operations.")
+ fmt.Println("FAIL: 在put操作之前无法读取process_resident_memory_bytes.")
os.Exit(cobrautl.ExitError)
}
- fmt.Println(fmt.Sprintf("Start data scale check for work load [%v key-value pairs, %v bytes per key-value, %v concurrent clients].", cfg.limit, cfg.kvSize, cfg.clients))
+ fmt.Println(fmt.Sprintf("启动工作负载的数据规模检查[%v key-value pairs, %v bytes per key-value, %v concurrent clients].", cfg.limit, cfg.kvSize, cfg.clients))
bar := pb.New(cfg.limit)
+ bar.Format("Bom !")
bar.Start()
for i := range clients {
diff --git a/etcdctl/ctlv3/command/compaction_command.go b/etcdctl/ctlv3/command/compaction_command.go
deleted file mode 100644
index 5c0bb1019a9..00000000000
--- a/etcdctl/ctlv3/command/compaction_command.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
- "strconv"
-
- "github.com/spf13/cobra"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-var compactPhysical bool
-
-// NewCompactionCommand returns the cobra command for "compaction".
-func NewCompactionCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "compaction [options] ",
- Short: "Compacts the event history in etcd",
- Run: compactionCommandFunc,
- }
- cmd.Flags().BoolVar(&compactPhysical, "physical", false, "'true' to wait for compaction to physically remove all old revisions")
- return cmd
-}
-
-// compactionCommandFunc executes the "compaction" command.
-func compactionCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("compaction command needs 1 argument"))
- }
-
- rev, err := strconv.ParseInt(args[0], 10, 64)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- var opts []clientv3.CompactOption
- if compactPhysical {
- opts = append(opts, clientv3.WithCompactPhysical())
- }
-
- c := mustClientFromCmd(cmd)
- ctx, cancel := commandCtx(cmd)
- _, cerr := c.Compact(ctx, rev, opts...)
- cancel()
- if cerr != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, cerr)
- }
- fmt.Println("compacted revision", rev)
-}
diff --git a/etcdctl/ctlv3/command/completion_command.go b/etcdctl/ctlv3/command/completion_command.go
deleted file mode 100644
index 66a213cd3a6..00000000000
--- a/etcdctl/ctlv3/command/completion_command.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "os"
-
- "github.com/spf13/cobra"
-)
-
-func NewCompletionCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "completion [bash|zsh|fish|powershell]",
- Short: "Generate completion script",
- Long: `To load completions:
-
-Bash:
-
- $ source <(etcdctl completion bash)
-
- # To load completions for each session, execute once:
- # Linux:
- $ etcdctl completion bash > /etc/bash_completion.d/etcdctl
- # macOS:
- $ etcdctl completion bash > /usr/local/etc/bash_completion.d/etcdctl
-
-Zsh:
-
- # If shell completion is not already enabled in your environment,
- # you will need to enable it. You can execute the following once:
-
- $ echo "autoload -U compinit; compinit" >> ~/.zshrc
-
- # To load completions for each session, execute once:
- $ etcdctl completion zsh > "${fpath[1]}/_etcdctl"
-
- # You will need to start a new shell for this setup to take effect.
-
-fish:
-
- $ etcdctl completion fish | source
-
- # To load completions for each session, execute once:
- $ etcdctl completion fish > ~/.config/fish/completions/etcdctl.fish
-
-PowerShell:
-
- PS> etcdctl completion powershell | Out-String | Invoke-Expression
-
- # To load completions for every new session, run:
- PS> etcdctl completion powershell > etcdctl.ps1
- # and source this file from your PowerShell profile.
-`,
- DisableFlagsInUseLine: true,
- ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
- Args: cobra.ExactValidArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- switch args[0] {
- case "bash":
- cmd.Root().GenBashCompletion(os.Stdout)
- case "zsh":
- cmd.Root().GenZshCompletion(os.Stdout)
- case "fish":
- cmd.Root().GenFishCompletion(os.Stdout, true)
- case "powershell":
- cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
- }
- },
- }
-
- return cmd
-}
diff --git a/etcdctl/ctlv3/command/defrag_command.go b/etcdctl/ctlv3/command/defrag_command.go
index 253847746a8..196a54e3df2 100644
--- a/etcdctl/ctlv3/command/defrag_command.go
+++ b/etcdctl/ctlv3/command/defrag_command.go
@@ -17,42 +17,46 @@ package command
import (
"fmt"
"os"
- "time"
+ "github.com/ls-2018/etcd_cn/etcdutl/etcdutl"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
-
- "go.etcd.io/etcd/pkg/v3/cobrautl"
)
-// NewDefragCommand returns the cobra command for "Defrag".
+var defragDataDir string
+
func NewDefragCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "defrag",
- Short: "Defragments the storage of the etcd members with given endpoints",
+ Short: "对给定端点的etcd成员的存储进行碎片整理",
Run: defragCommandFunc,
}
- cmd.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list")
+ cmd.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "使用集群成员列表中的所有端点")
+ cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "可选的.如果存在,对etcd不使用的数据目录进行碎片整理.")
return cmd
}
func defragCommandFunc(cmd *cobra.Command, args []string) {
+ if len(defragDataDir) > 0 {
+ fmt.Fprintf(os.Stderr, "Use `etcdutl defrag` instead. The --data-dir is going to be decomissioned in v3.6.\n\n")
+ err := etcdutl.DefragData(defragDataDir)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ }
+
failures := 0
- cfg := clientConfigFromCmd(cmd)
+ c := mustClientFromCmd(cmd)
for _, ep := range endpointsFromCluster(cmd) {
- cfg.Endpoints = []string{ep}
- c := mustClient(cfg)
ctx, cancel := commandCtx(cmd)
- start := time.Now()
_, err := c.Defragment(ctx, ep)
- d := time.Now().Sub(start)
cancel()
if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to defragment etcd member[%s]. took %s. (%v)\n", ep, d.String(), err)
+ fmt.Fprintf(os.Stderr, "整理etcd成员失败 [%s] (%v)\n", ep, err)
failures++
} else {
- fmt.Printf("Finished defragmenting etcd member[%s]. took %s\n", ep, d.String())
+ fmt.Printf("整理etcd成员完成[%s]\n", ep)
}
- c.Close()
}
if failures != 0 {
diff --git a/etcdctl/ctlv3/command/del_command.go b/etcdctl/ctlv3/command/del_command.go
deleted file mode 100644
index 51b7abb3edf..00000000000
--- a/etcdctl/ctlv3/command/del_command.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
- "os"
- "time"
-
- "github.com/spf13/cobra"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-var (
- delPrefix bool
- delPrevKV bool
- delFromKey bool
- delRange bool
-)
-
-// NewDelCommand returns the cobra command for "del".
-func NewDelCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "del [options] [range_end]",
- Short: "Removes the specified key or range of keys [key, range_end)",
- Run: delCommandFunc,
- }
-
- cmd.Flags().BoolVar(&delPrefix, "prefix", false, "delete keys with matching prefix")
- cmd.Flags().BoolVar(&delPrevKV, "prev-kv", false, "return deleted key-value pairs")
- cmd.Flags().BoolVar(&delFromKey, "from-key", false, "delete keys that are greater than or equal to the given key using byte compare")
- cmd.Flags().BoolVar(&delRange, "range", false, "delete range of keys")
- return cmd
-}
-
-// delCommandFunc executes the "del" command.
-func delCommandFunc(cmd *cobra.Command, args []string) {
- key, opts := getDelOp(args)
- ctx, cancel := commandCtx(cmd)
- resp, err := mustClientFromCmd(cmd).Delete(ctx, key, opts...)
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- display.Del(*resp)
-}
-
-func getDelOp(args []string) (string, []clientv3.OpOption) {
- if len(args) == 0 || len(args) > 2 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("del command needs one argument as key and an optional argument as range_end"))
- }
-
- if delPrefix && delFromKey {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one"))
- }
-
- var opts []clientv3.OpOption
- key := args[0]
- if len(args) > 1 {
- if delPrefix || delFromKey {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("too many arguments, only accept one argument when `--prefix` or `--from-key` is set"))
- }
- opts = append(opts, clientv3.WithRange(args[1]))
- if !delRange {
- fmt.Fprintf(os.Stderr, "Warning: Keys between %q and %q will be deleted. Please interrupt the command within next 2 seconds to cancel. "+
- "You can provide `--range` flag to avoid the delay.\n", args[0], args[1])
- time.Sleep(2 * time.Second)
- }
- }
-
- if delPrefix {
- if len(key) == 0 {
- key = "\x00"
- opts = append(opts, clientv3.WithFromKey())
- } else {
- opts = append(opts, clientv3.WithPrefix())
- }
- }
- if delPrevKV {
- opts = append(opts, clientv3.WithPrevKV())
- }
-
- if delFromKey {
- if len(key) == 0 {
- key = "\x00"
- }
- opts = append(opts, clientv3.WithFromKey())
- }
-
- return key, opts
-}
diff --git a/etcdctl/ctlv3/command/downgrade_command.go b/etcdctl/ctlv3/command/downgrade_command.go
deleted file mode 100644
index 8b6ab9cd19e..00000000000
--- a/etcdctl/ctlv3/command/downgrade_command.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "errors"
-
- "github.com/spf13/cobra"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-// NewDowngradeCommand returns the cobra command for "downgrade".
-func NewDowngradeCommand() *cobra.Command {
- dc := &cobra.Command{
- Use: "downgrade ",
- Short: "Downgrade related commands",
- }
-
- dc.AddCommand(NewDowngradeValidateCommand())
- dc.AddCommand(NewDowngradeEnableCommand())
- dc.AddCommand(NewDowngradeCancelCommand())
-
- return dc
-}
-
-// NewDowngradeValidateCommand returns the cobra command for "downgrade validate".
-func NewDowngradeValidateCommand() *cobra.Command {
- cc := &cobra.Command{
- Use: "validate ",
- Short: "Validate downgrade capability before starting downgrade",
-
- Run: downgradeValidateCommandFunc,
- }
- return cc
-}
-
-// NewDowngradeEnableCommand returns the cobra command for "downgrade enable".
-func NewDowngradeEnableCommand() *cobra.Command {
- cc := &cobra.Command{
- Use: "enable ",
- Short: "Start a downgrade action to cluster",
-
- Run: downgradeEnableCommandFunc,
- }
- return cc
-}
-
-// NewDowngradeCancelCommand returns the cobra command for "downgrade cancel".
-func NewDowngradeCancelCommand() *cobra.Command {
- cc := &cobra.Command{
- Use: "cancel",
- Short: "Cancel the ongoing downgrade action to cluster",
-
- Run: downgradeCancelCommandFunc,
- }
- return cc
-}
-
-// downgradeValidateCommandFunc executes the "downgrade validate" command.
-func downgradeValidateCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) < 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("TARGET_VERSION not provided"))
- }
- if len(args) > 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("too many arguments"))
- }
- targetVersion := args[0]
-
- if len(targetVersion) == 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("target version not provided"))
- }
-
- ctx, cancel := commandCtx(cmd)
- cli := mustClientFromCmd(cmd)
-
- resp, err := cli.Downgrade(ctx, clientv3.DowngradeValidate, targetVersion)
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.DowngradeValidate(*resp)
-}
-
-// downgradeEnableCommandFunc executes the "downgrade enable" command.
-func downgradeEnableCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) < 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("TARGET_VERSION not provided"))
- }
- if len(args) > 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("too many arguments"))
- }
- targetVersion := args[0]
-
- if len(targetVersion) == 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("target version not provided"))
- }
-
- ctx, cancel := commandCtx(cmd)
- cli := mustClientFromCmd(cmd)
-
- resp, err := cli.Downgrade(ctx, clientv3.DowngradeEnable, targetVersion)
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.DowngradeEnable(*resp)
-}
-
-// downgradeCancelCommandFunc executes the "downgrade cancel" command.
-func downgradeCancelCommandFunc(cmd *cobra.Command, args []string) {
- ctx, cancel := commandCtx(cmd)
- cli := mustClientFromCmd(cmd)
-
- resp, err := cli.Downgrade(ctx, clientv3.DowngradeCancel, "")
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.DowngradeCancel(*resp)
-}
diff --git a/etcdctl/ctlv3/command/elect_command.go b/etcdctl/ctlv3/command/elect_command.go
index 14feb13f5bf..15be42e8fe9 100644
--- a/etcdctl/ctlv3/command/elect_command.go
+++ b/etcdctl/ctlv3/command/elect_command.go
@@ -17,47 +17,47 @@ package command
import (
"context"
"errors"
+ "fmt"
"os"
"os/signal"
"syscall"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
)
-var (
- electListen bool
-)
+var electListen bool
// NewElectCommand returns the cobra command for "elect".
func NewElectCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "elect [proposal]",
- Short: "Observes and participates in leader election",
+ Short: "观察并参与leader选举",
Run: electCommandFunc,
}
- cmd.Flags().BoolVarP(&electListen, "listen", "l", false, "observation mode")
+ cmd.Flags().BoolVarP(&electListen, "listen", "l", false, "观察模式")
return cmd
}
func electCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 && len(args) != 2 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("elect takes one election name argument and an optional proposal argument"))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("elect -l id"))
}
c := mustClientFromCmd(cmd)
var err error
if len(args) == 1 {
if !electListen {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("no proposal argument but -l not set"))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("没有proposal参数,并且-l没有设置"))
}
err = observe(c, args[0])
} else {
if electListen {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("proposal given but -l is set"))
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("有proposal参数,但是-l设置了"))
}
err = campaign(c, args[0], args[1])
}
@@ -66,11 +66,13 @@ func electCommandFunc(cmd *cobra.Command, args []string) {
}
}
+// 观察
func observe(c *clientv3.Client, election string) error {
s, err := concurrency.NewSession(c)
if err != nil {
return err
}
+ fmt.Println("election:----->", election)
e := concurrency.NewElection(s, election)
ctx, cancel := context.WithCancel(context.TODO())
@@ -94,12 +96,13 @@ func observe(c *clientv3.Client, election string) error {
select {
case <-ctx.Done():
default:
- return errors.New("elect: observer lost")
+ return errors.New("elect: 观察者丢失")
}
return nil
}
+// 运动
func campaign(c *clientv3.Client, election string, prop string) error {
s, err := concurrency.NewSession(c)
if err != nil {
@@ -121,7 +124,6 @@ func campaign(c *clientv3.Client, election string, prop string) error {
return err
}
- // print key since elected
resp, err := c.Get(ctx, e.Key())
if err != nil {
return err
@@ -131,7 +133,7 @@ func campaign(c *clientv3.Client, election string, prop string) error {
select {
case <-donec:
case <-s.Done():
- return errors.New("elect: session expired")
+ return errors.New("elect: 会话过期")
}
return e.Resign(context.TODO())
diff --git a/etcdctl/ctlv3/command/ep_command.go b/etcdctl/ctlv3/command/ep_command.go
deleted file mode 100644
index 0964f564c69..00000000000
--- a/etcdctl/ctlv3/command/ep_command.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
- "os"
- "sync"
- "time"
-
- "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- "go.etcd.io/etcd/client/pkg/v3/logutil"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.etcd.io/etcd/pkg/v3/flags"
-
- "github.com/spf13/cobra"
- "go.uber.org/zap"
-)
-
-var epClusterEndpoints bool
-var epHashKVRev int64
-
-// NewEndpointCommand returns the cobra command for "endpoint".
-func NewEndpointCommand() *cobra.Command {
- ec := &cobra.Command{
- Use: "endpoint ",
- Short: "Endpoint related commands",
- }
-
- ec.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list")
- ec.AddCommand(newEpHealthCommand())
- ec.AddCommand(newEpStatusCommand())
- ec.AddCommand(newEpHashKVCommand())
-
- return ec
-}
-
-func newEpHealthCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "health",
- Short: "Checks the healthiness of endpoints specified in `--endpoints` flag",
- Run: epHealthCommandFunc,
- }
-
- return cmd
-}
-
-func newEpStatusCommand() *cobra.Command {
- return &cobra.Command{
- Use: "status",
- Short: "Prints out the status of endpoints specified in `--endpoints` flag",
- Long: `When --write-out is set to simple, this command prints out comma-separated status lists for each endpoint.
-The items in the lists are endpoint, ID, version, db size, is leader, is learner, raft term, raft index, raft applied index, errors.
-`,
- Run: epStatusCommandFunc,
- }
-}
-
-func newEpHashKVCommand() *cobra.Command {
- hc := &cobra.Command{
- Use: "hashkv",
- Short: "Prints the KV history hash for each endpoint in --endpoints",
- Run: epHashKVCommandFunc,
- }
- hc.PersistentFlags().Int64Var(&epHashKVRev, "rev", 0, "maximum revision to hash (default: all revisions)")
- return hc
-}
-
-type epHealth struct {
- Ep string `json:"endpoint"`
- Health bool `json:"health"`
- Took string `json:"took"`
- Error string `json:"error,omitempty"`
-}
-
-// epHealthCommandFunc executes the "endpoint-health" command.
-func epHealthCommandFunc(cmd *cobra.Command, args []string) {
- lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- flags.SetPflagsFromEnv(lg, "ETCDCTL", cmd.InheritedFlags())
- initDisplayFromCmd(cmd)
-
- sec := secureCfgFromCmd(cmd)
- dt := dialTimeoutFromCmd(cmd)
- ka := keepAliveTimeFromCmd(cmd)
- kat := keepAliveTimeoutFromCmd(cmd)
- auth := authCfgFromCmd(cmd)
- var cfgs []*clientv3.Config
- for _, ep := range endpointsFromCluster(cmd) {
- cfg, err := clientv3.NewClientConfig(&clientv3.ConfigSpec{
- Endpoints: []string{ep},
- DialTimeout: dt,
- KeepAliveTime: ka,
- KeepAliveTimeout: kat,
- Secure: sec,
- Auth: auth,
- }, lg)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
- }
- cfgs = append(cfgs, cfg)
- }
-
- var wg sync.WaitGroup
- hch := make(chan epHealth, len(cfgs))
- for _, cfg := range cfgs {
- wg.Add(1)
- go func(cfg *clientv3.Config) {
- defer wg.Done()
- ep := cfg.Endpoints[0]
- cfg.Logger = lg.Named("client")
- cli, err := clientv3.New(*cfg)
- if err != nil {
- hch <- epHealth{Ep: ep, Health: false, Error: err.Error()}
- return
- }
- st := time.Now()
- // get a random key. As long as we can get the response without an error, the
- // endpoint is health.
- ctx, cancel := commandCtx(cmd)
- _, err = cli.Get(ctx, "health")
- eh := epHealth{Ep: ep, Health: false, Took: time.Since(st).String()}
- // permission denied is OK since proposal goes through consensus to get it
- if err == nil || err == rpctypes.ErrPermissionDenied {
- eh.Health = true
- } else {
- eh.Error = err.Error()
- }
-
- if eh.Health {
- resp, err := cli.AlarmList(ctx)
- if err == nil && len(resp.Alarms) > 0 {
- eh.Health = false
- eh.Error = "Active Alarm(s): "
- for _, v := range resp.Alarms {
- switch v.Alarm {
- case etcdserverpb.AlarmType_NOSPACE:
- eh.Error = eh.Error + "NOSPACE "
- case etcdserverpb.AlarmType_CORRUPT:
- eh.Error = eh.Error + "CORRUPT "
- default:
- eh.Error = eh.Error + "UNKNOWN "
- }
- }
- } else if err != nil {
- eh.Health = false
- eh.Error = "Unable to fetch the alarm list"
- }
- }
- cancel()
- hch <- eh
- }(cfg)
- }
-
- wg.Wait()
- close(hch)
-
- errs := false
- var healthList []epHealth
- for h := range hch {
- healthList = append(healthList, h)
- if h.Error != "" {
- errs = true
- }
- }
- display.EndpointHealth(healthList)
- if errs {
- cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("unhealthy cluster"))
- }
-}
-
-type epStatus struct {
- Ep string `json:"Endpoint"`
- Resp *clientv3.StatusResponse `json:"Status"`
-}
-
-func epStatusCommandFunc(cmd *cobra.Command, args []string) {
- cfg := clientConfigFromCmd(cmd)
-
- var statusList []epStatus
- var err error
- for _, ep := range endpointsFromCluster(cmd) {
- cfg.Endpoints = []string{ep}
- c := mustClient(cfg)
- ctx, cancel := commandCtx(cmd)
- resp, serr := c.Status(ctx, ep)
- cancel()
- c.Close()
- if serr != nil {
- err = serr
- fmt.Fprintf(os.Stderr, "Failed to get the status of endpoint %s (%v)\n", ep, serr)
- continue
- }
- statusList = append(statusList, epStatus{Ep: ep, Resp: resp})
- }
-
- display.EndpointStatus(statusList)
-
- if err != nil {
- os.Exit(cobrautl.ExitError)
- }
-}
-
-type epHashKV struct {
- Ep string `json:"Endpoint"`
- Resp *clientv3.HashKVResponse `json:"HashKV"`
-}
-
-func epHashKVCommandFunc(cmd *cobra.Command, args []string) {
- cfg := clientConfigFromCmd(cmd)
-
- var hashList []epHashKV
- var err error
- for _, ep := range endpointsFromCluster(cmd) {
- cfg.Endpoints = []string{ep}
- c := mustClient(cfg)
- ctx, cancel := commandCtx(cmd)
- resp, serr := c.HashKV(ctx, ep, epHashKVRev)
- cancel()
- c.Close()
- if serr != nil {
- err = serr
- fmt.Fprintf(os.Stderr, "Failed to get the hash of endpoint %s (%v)\n", ep, serr)
- continue
- }
- hashList = append(hashList, epHashKV{Ep: ep, Resp: resp})
- }
-
- display.EndpointHashKV(hashList)
-
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-}
-
-func endpointsFromCluster(cmd *cobra.Command) []string {
- if !epClusterEndpoints {
- endpoints, err := cmd.Flags().GetStringSlice("endpoints")
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- return endpoints
- }
-
- sec := secureCfgFromCmd(cmd)
- dt := dialTimeoutFromCmd(cmd)
- ka := keepAliveTimeFromCmd(cmd)
- kat := keepAliveTimeoutFromCmd(cmd)
- eps, err := endpointsFromCmd(cmd)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- // exclude auth for not asking needless password (MemberList() doesn't need authentication)
- lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel)
- cfg, err := clientv3.NewClientConfig(&clientv3.ConfigSpec{
- Endpoints: eps,
- DialTimeout: dt,
- KeepAliveTime: ka,
- KeepAliveTimeout: kat,
- Secure: sec,
- }, lg)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- c, err := clientv3.New(*cfg)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- ctx, cancel := commandCtx(cmd)
- defer func() {
- c.Close()
- cancel()
- }()
- membs, err := c.MemberList(ctx)
- if err != nil {
- err = fmt.Errorf("failed to fetch endpoints from etcd cluster member list: %v", err)
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- var ret []string
- for _, m := range membs.Members {
- ret = append(ret, m.ClientURLs...)
- }
- return ret
-}
diff --git a/etcdctl/ctlv3/command/get_command.go b/etcdctl/ctlv3/command/get_command.go
deleted file mode 100644
index a18cc32b97c..00000000000
--- a/etcdctl/ctlv3/command/get_command.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
- "strings"
-
- "github.com/spf13/cobra"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-var (
- getConsistency string
- getLimit int64
- getSortOrder string
- getSortTarget string
- getPrefix bool
- getFromKey bool
- getRev int64
- getKeysOnly bool
- getCountOnly bool
- printValueOnly bool
-)
-
-// NewGetCommand returns the cobra command for "get".
-func NewGetCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "get [options] [range_end]",
- Short: "Gets the key or a range of keys",
- Run: getCommandFunc,
- }
-
- cmd.Flags().StringVar(&getConsistency, "consistency", "l", "Linearizable(l) or Serializable(s)")
- cmd.Flags().StringVar(&getSortOrder, "order", "", "Order of results; ASCEND or DESCEND (ASCEND by default)")
- cmd.Flags().StringVar(&getSortTarget, "sort-by", "", "Sort target; CREATE, KEY, MODIFY, VALUE, or VERSION")
- cmd.Flags().Int64Var(&getLimit, "limit", 0, "Maximum number of results")
- cmd.Flags().BoolVar(&getPrefix, "prefix", false, "Get keys with matching prefix")
- cmd.Flags().BoolVar(&getFromKey, "from-key", false, "Get keys that are greater than or equal to the given key using byte compare")
- cmd.Flags().Int64Var(&getRev, "rev", 0, "Specify the kv revision")
- cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "Get only the keys")
- cmd.Flags().BoolVar(&getCountOnly, "count-only", false, "Get only the count")
- cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `Only write values when using the "simple" output format`)
-
- cmd.RegisterFlagCompletionFunc("consistency", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
- return []string{"l", "s"}, cobra.ShellCompDirectiveDefault
- })
- cmd.RegisterFlagCompletionFunc("order", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
- return []string{"ASCEND", "DESCEND"}, cobra.ShellCompDirectiveDefault
- })
- cmd.RegisterFlagCompletionFunc("sort-by", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
- return []string{"CREATE", "KEY", "MODIFY", "VALUE", "VERSION"}, cobra.ShellCompDirectiveDefault
- })
-
- return cmd
-}
-
-// getCommandFunc executes the "get" command.
-func getCommandFunc(cmd *cobra.Command, args []string) {
- key, opts := getGetOp(args)
- ctx, cancel := commandCtx(cmd)
- resp, err := mustClientFromCmd(cmd).Get(ctx, key, opts...)
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- if getCountOnly {
- if _, fields := display.(*fieldsPrinter); !fields {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("--count-only is only for `--write-out=fields`"))
- }
- }
-
- if printValueOnly {
- dp, simple := (display).(*simplePrinter)
- if !simple {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("print-value-only is only for `--write-out=simple`"))
- }
- dp.valueOnly = true
- }
- display.Get(*resp)
-}
-
-func getGetOp(args []string) (string, []clientv3.OpOption) {
- if len(args) == 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("get command needs one argument as key and an optional argument as range_end"))
- }
-
- if getPrefix && getFromKey {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one"))
- }
-
- if getKeysOnly && getCountOnly {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one"))
- }
-
- var opts []clientv3.OpOption
- switch getConsistency {
- case "s":
- opts = append(opts, clientv3.WithSerializable())
- case "l":
- default:
- cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("unknown consistency flag %q", getConsistency))
- }
-
- key := args[0]
- if len(args) > 1 {
- if getPrefix || getFromKey {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("too many arguments, only accept one argument when `--prefix` or `--from-key` is set"))
- }
- opts = append(opts, clientv3.WithRange(args[1]))
- }
-
- opts = append(opts, clientv3.WithLimit(getLimit))
- if getRev > 0 {
- opts = append(opts, clientv3.WithRev(getRev))
- }
-
- sortByOrder := clientv3.SortNone
- sortOrder := strings.ToUpper(getSortOrder)
- switch {
- case sortOrder == "ASCEND":
- sortByOrder = clientv3.SortAscend
- case sortOrder == "DESCEND":
- sortByOrder = clientv3.SortDescend
- case sortOrder == "":
- // nothing
- default:
- cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("bad sort order %v", getSortOrder))
- }
-
- sortByTarget := clientv3.SortByKey
- sortTarget := strings.ToUpper(getSortTarget)
- switch {
- case sortTarget == "CREATE":
- sortByTarget = clientv3.SortByCreateRevision
- case sortTarget == "KEY":
- sortByTarget = clientv3.SortByKey
- case sortTarget == "MODIFY":
- sortByTarget = clientv3.SortByModRevision
- case sortTarget == "VALUE":
- sortByTarget = clientv3.SortByValue
- case sortTarget == "VERSION":
- sortByTarget = clientv3.SortByVersion
- case sortTarget == "":
- // nothing
- default:
- cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("bad sort target %v", getSortTarget))
- }
-
- opts = append(opts, clientv3.WithSort(sortByTarget, sortByOrder))
-
- if getPrefix {
- if len(key) == 0 {
- key = "\x00"
- opts = append(opts, clientv3.WithFromKey())
- } else {
- opts = append(opts, clientv3.WithPrefix())
- }
- }
-
- if getFromKey {
- if len(key) == 0 {
- key = "\x00"
- }
- opts = append(opts, clientv3.WithFromKey())
- }
-
- if getKeysOnly {
- opts = append(opts, clientv3.WithKeysOnly())
- }
-
- if getCountOnly {
- opts = append(opts, clientv3.WithCountOnly())
- }
-
- return key, opts
-}
diff --git a/etcdctl/ctlv3/command/global.go b/etcdctl/ctlv3/command/global.go
index 6997b94bdb6..a177ed535cb 100644
--- a/etcdctl/ctlv3/command/global.go
+++ b/etcdctl/ctlv3/command/global.go
@@ -15,22 +15,21 @@
package command
import (
+ "crypto/tls"
"errors"
"fmt"
"io"
+ "io/ioutil"
"os"
"strings"
"time"
"github.com/bgentry/speakeasy"
-
- "go.etcd.io/etcd/client/pkg/v3/logutil"
- "go.etcd.io/etcd/client/pkg/v3/srv"
- "go.etcd.io/etcd/client/pkg/v3/transport"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.etcd.io/etcd/pkg/v3/flags"
-
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/srv"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/transport"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/ls-2018/etcd_cn/pkg/flags"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"go.uber.org/zap"
@@ -61,6 +60,21 @@ type GlobalFlags struct {
Debug bool
}
+type secureCfg struct {
+ cert string
+ key string
+ cacert string
+ serverName string
+
+ insecureTransport bool
+ insecureSkipVerify bool
+}
+
+type authCfg struct {
+ username string
+ password string
+}
+
type discoveryCfg struct {
domain string
insecure bool
@@ -83,14 +97,23 @@ func initDisplayFromCmd(cmd *cobra.Command) {
}
}
+type clientConfig struct {
+ endpoints []string
+ dialTimeout time.Duration
+ keepAliveTime time.Duration
+ keepAliveTimeout time.Duration
+ scfg *secureCfg
+ acfg *authCfg
+}
+
type discardValue struct{}
func (*discardValue) String() string { return "" }
func (*discardValue) Set(string) error { return nil }
func (*discardValue) Type() string { return "" }
-func clientConfigFromCmd(cmd *cobra.Command) *clientv3.ConfigSpec {
- lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
+func clientConfigFromCmd(cmd *cobra.Command) *clientConfig {
+ lg, err := zap.NewProduction()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
@@ -117,21 +140,21 @@ func clientConfigFromCmd(cmd *cobra.Command) *clientv3.ConfigSpec {
// too many routine connection disconnects to turn on by default.
//
// See https://github.com/etcd-io/etcd/pull/9623 for background
- grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, io.Discard, os.Stderr))
+ grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, os.Stderr))
}
- cfg := &clientv3.ConfigSpec{}
- cfg.Endpoints, err = endpointsFromCmd(cmd)
+ cfg := &clientConfig{}
+ cfg.endpoints, err = endpointsFromCmd(cmd)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
- cfg.DialTimeout = dialTimeoutFromCmd(cmd)
- cfg.KeepAliveTime = keepAliveTimeFromCmd(cmd)
- cfg.KeepAliveTimeout = keepAliveTimeoutFromCmd(cmd)
+ cfg.dialTimeout = dialTimeoutFromCmd(cmd)
+ cfg.keepAliveTime = keepAliveTimeFromCmd(cmd)
+ cfg.keepAliveTimeout = keepAliveTimeoutFromCmd(cmd)
- cfg.Secure = secureCfgFromCmd(cmd)
- cfg.Auth = authCfgFromCmd(cmd)
+ cfg.scfg = secureCfgFromCmd(cmd)
+ cfg.acfg = authCfgFromCmd(cmd)
initDisplayFromCmd(cmd)
return cfg
@@ -139,8 +162,7 @@ func clientConfigFromCmd(cmd *cobra.Command) *clientv3.ConfigSpec {
func mustClientCfgFromCmd(cmd *cobra.Command) *clientv3.Config {
cc := clientConfigFromCmd(cmd)
- lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel)
- cfg, err := clientv3.NewClientConfig(cc, lg)
+ cfg, err := newClientCfg(cc.endpoints, cc.dialTimeout, cc.keepAliveTime, cc.keepAliveTimeout, cc.scfg, cc.acfg)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
@@ -148,13 +170,12 @@ func mustClientCfgFromCmd(cmd *cobra.Command) *clientv3.Config {
}
func mustClientFromCmd(cmd *cobra.Command) *clientv3.Client {
- cfg := clientConfigFromCmd(cmd)
- return mustClient(cfg)
+ cfg := clientConfigFromCmd(cmd) // ok
+ return cfg.mustClient()
}
-func mustClient(cc *clientv3.ConfigSpec) *clientv3.Client {
- lg, _ := logutil.CreateDefaultZapLogger(zap.InfoLevel)
- cfg, err := clientv3.NewClientConfig(cc, lg)
+func (cc *clientConfig) mustClient() *clientv3.Client {
+ cfg, err := newClientCfg(cc.endpoints, cc.dialTimeout, cc.keepAliveTime, cc.keepAliveTimeout, cc.scfg, cc.acfg)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
@@ -167,11 +188,71 @@ func mustClient(cc *clientv3.ConfigSpec) *clientv3.Client {
return client
}
+func newClientCfg(endpoints []string, dialTimeout, keepAliveTime, keepAliveTimeout time.Duration, scfg *secureCfg, acfg *authCfg) (*clientv3.Config, error) {
+ var cfgtls *transport.TLSInfo
+ tlsinfo := transport.TLSInfo{}
+ tlsinfo.Logger, _ = zap.NewProduction()
+ if scfg.cert != "" {
+ tlsinfo.CertFile = scfg.cert
+ cfgtls = &tlsinfo
+ }
+
+ if scfg.key != "" {
+ tlsinfo.KeyFile = scfg.key
+ cfgtls = &tlsinfo
+ }
+
+ if scfg.cacert != "" {
+ tlsinfo.TrustedCAFile = scfg.cacert
+ cfgtls = &tlsinfo
+ }
+
+ if scfg.serverName != "" {
+ tlsinfo.ServerName = scfg.serverName
+ cfgtls = &tlsinfo
+ }
+
+ cfg := &clientv3.Config{
+ Endpoints: endpoints,
+ DialTimeout: dialTimeout,
+ DialKeepAliveTime: keepAliveTime,
+ DialKeepAliveTimeout: keepAliveTimeout,
+ }
+
+ if cfgtls != nil {
+ clientTLS, err := cfgtls.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ cfg.TLS = clientTLS
+ }
+
+ // if key/cert is not given but user wants secure connection, we
+ // should still setup an empty tls configuration for gRPC to setup
+ // secure connection.
+ if cfg.TLS == nil && !scfg.insecureTransport {
+ cfg.TLS = &tls.Config{}
+ }
+
+ // If the user wants to skip TLS verification then we should set
+ // the InsecureSkipVerify flag in tls configuration.
+ if scfg.insecureSkipVerify && cfg.TLS != nil {
+ cfg.TLS.InsecureSkipVerify = true
+ }
+
+ if acfg != nil {
+ cfg.Username = acfg.username
+ cfg.Password = acfg.password
+ }
+
+ return cfg, nil
+}
+
func argOrStdin(args []string, stdin io.Reader, i int) (string, error) {
if i < len(args) {
return args[i], nil
}
- bytes, err := io.ReadAll(stdin)
+ bytes, err := ioutil.ReadAll(stdin)
if string(bytes) == "" || err != nil {
return "", errors.New("no available argument and stdin")
}
@@ -202,7 +283,7 @@ func keepAliveTimeoutFromCmd(cmd *cobra.Command) time.Duration {
return keepAliveTimeout
}
-func secureCfgFromCmd(cmd *cobra.Command) *clientv3.SecureConfig {
+func secureCfgFromCmd(cmd *cobra.Command) *secureCfg {
cert, key, cacert := keyAndCertFromCmd(cmd)
insecureTr := insecureTransportFromCmd(cmd)
skipVerify := insecureSkipVerifyFromCmd(cmd)
@@ -212,14 +293,14 @@ func secureCfgFromCmd(cmd *cobra.Command) *clientv3.SecureConfig {
discoveryCfg.domain = ""
}
- return &clientv3.SecureConfig{
- Cert: cert,
- Key: key,
- Cacert: cacert,
- ServerName: discoveryCfg.domain,
+ return &secureCfg{
+ cert: cert,
+ key: key,
+ cacert: cacert,
+ serverName: discoveryCfg.domain,
- InsecureTransport: insecureTr,
- InsecureSkipVerify: skipVerify,
+ insecureTransport: insecureTr,
+ insecureSkipVerify: skipVerify,
}
}
@@ -262,7 +343,7 @@ func keyAndCertFromCmd(cmd *cobra.Command) (cert, key, cacert string) {
return cert, key, cacert
}
-func authCfgFromCmd(cmd *cobra.Command) *clientv3.AuthConfig {
+func authCfgFromCmd(cmd *cobra.Command) *authCfg {
userFlag, err := cmd.Flags().GetString("user")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
@@ -276,62 +357,58 @@ func authCfgFromCmd(cmd *cobra.Command) *clientv3.AuthConfig {
return nil
}
- var cfg clientv3.AuthConfig
+ var cfg authCfg
if passwordFlag == "" {
splitted := strings.SplitN(userFlag, ":", 2)
if len(splitted) < 2 {
- cfg.Username = userFlag
- cfg.Password, err = speakeasy.Ask("Password: ")
+ cfg.username = userFlag
+ cfg.password, err = speakeasy.Ask("Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
- cfg.Username = splitted[0]
- cfg.Password = splitted[1]
+ cfg.username = splitted[0]
+ cfg.password = splitted[1]
}
} else {
- cfg.Username = userFlag
- cfg.Password = passwordFlag
+ cfg.username = userFlag
+ cfg.password = passwordFlag
}
return &cfg
}
-func insecureDiscoveryFromCmd(cmd *cobra.Command) bool {
- discovery, err := cmd.Flags().GetBool("insecure-discovery")
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- return discovery
-}
+func endpointsFromFlagValue(cmd *cobra.Command) ([]string, error) {
+ discoveryCfg := discoveryCfgFromCmd(cmd)
-func discoverySrvFromCmd(cmd *cobra.Command) string {
- domainStr, err := cmd.Flags().GetString("discovery-srv")
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
+ // If we still don't have domain discovery, return nothing
+ if discoveryCfg.domain == "" {
+ return []string{}, nil
}
- return domainStr
-}
-func discoveryDNSClusterServiceNameFromCmd(cmd *cobra.Command) string {
- serviceNameStr, err := cmd.Flags().GetString("discovery-srv-name")
+ srvs, err := srv.GetClient("etcd-client", discoveryCfg.domain, discoveryCfg.serviceName)
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
+ return nil, err
}
- return serviceNameStr
-}
-
-func discoveryCfgFromCmd(cmd *cobra.Command) *discoveryCfg {
- return &discoveryCfg{
- domain: discoverySrvFromCmd(cmd),
- insecure: insecureDiscoveryFromCmd(cmd),
- serviceName: discoveryDNSClusterServiceNameFromCmd(cmd),
+ eps := srvs.Endpoints
+ if discoveryCfg.insecure {
+ return eps, err
+ }
+ // strip insecure connections
+ ret := []string{}
+ for _, ep := range eps {
+ if strings.HasPrefix(ep, "http://") {
+ fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep)
+ continue
+ }
+ ret = append(ret, ep)
}
+ return ret, err
}
func endpointsFromCmd(cmd *cobra.Command) ([]string, error) {
- eps, err := endpointsFromFlagValue(cmd)
+ eps, err := endpointsFromFlagValue(cmd) // 获取endpoints
if err != nil {
return nil, err
}
@@ -347,30 +424,34 @@ func endpointsFromCmd(cmd *cobra.Command) ([]string, error) {
return eps, err
}
-func endpointsFromFlagValue(cmd *cobra.Command) ([]string, error) {
- discoveryCfg := discoveryCfgFromCmd(cmd)
-
- // If we still don't have domain discovery, return nothing
- if discoveryCfg.domain == "" {
- return []string{}, nil
+func insecureDiscoveryFromCmd(cmd *cobra.Command) bool {
+ discovery, err := cmd.Flags().GetBool("insecure-discovery")
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
}
+ return discovery
+}
- srvs, err := srv.GetClient("etcd-client", discoveryCfg.domain, discoveryCfg.serviceName)
+func discoverySrvFromCmd(cmd *cobra.Command) string {
+ domainStr, err := cmd.Flags().GetString("discovery-srv")
if err != nil {
- return nil, err
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
- eps := srvs.Endpoints
- if discoveryCfg.insecure {
- return eps, err
+ return domainStr
+}
+
+func discoveryDNSClusterServiceNameFromCmd(cmd *cobra.Command) string {
+ serviceNameStr, err := cmd.Flags().GetString("discovery-srv-name")
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
- // strip insecure connections
- var ret []string
- for _, ep := range eps {
- if strings.HasPrefix(ep, "http://") {
- fmt.Fprintf(os.Stderr, "ignoring discovered insecure endpoint %q\n", ep)
- continue
- }
- ret = append(ret, ep)
+ return serviceNameStr
+}
+
+func discoveryCfgFromCmd(cmd *cobra.Command) *discoveryCfg {
+ return &discoveryCfg{
+ domain: discoverySrvFromCmd(cmd), // string
+ insecure: insecureDiscoveryFromCmd(cmd), // bool
+ serviceName: discoveryDNSClusterServiceNameFromCmd(cmd), // string
}
- return ret, err
}
diff --git a/etcdctl/ctlv3/command/lease_command.go b/etcdctl/ctlv3/command/lease_command.go
deleted file mode 100644
index 97cacdfaf30..00000000000
--- a/etcdctl/ctlv3/command/lease_command.go
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "context"
- "fmt"
- "strconv"
-
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "github.com/spf13/cobra"
-)
-
-// NewLeaseCommand returns the cobra command for "lease".
-func NewLeaseCommand() *cobra.Command {
- lc := &cobra.Command{
- Use: "lease ",
- Short: "Lease related commands",
- }
-
- lc.AddCommand(NewLeaseGrantCommand())
- lc.AddCommand(NewLeaseRevokeCommand())
- lc.AddCommand(NewLeaseTimeToLiveCommand())
- lc.AddCommand(NewLeaseListCommand())
- lc.AddCommand(NewLeaseKeepAliveCommand())
-
- return lc
-}
-
-// NewLeaseGrantCommand returns the cobra command for "lease grant".
-func NewLeaseGrantCommand() *cobra.Command {
- lc := &cobra.Command{
- Use: "grant ",
- Short: "Creates leases",
-
- Run: leaseGrantCommandFunc,
- }
-
- return lc
-}
-
-// leaseGrantCommandFunc executes the "lease grant" command.
-func leaseGrantCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease grant command needs TTL argument"))
- }
-
- ttl, err := strconv.ParseInt(args[0], 10, 64)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad TTL (%v)", err))
- }
-
- ctx, cancel := commandCtx(cmd)
- resp, err := mustClientFromCmd(cmd).Grant(ctx, ttl)
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to grant lease (%v)", err))
- }
- display.Grant(*resp)
-}
-
-// NewLeaseRevokeCommand returns the cobra command for "lease revoke".
-func NewLeaseRevokeCommand() *cobra.Command {
- lc := &cobra.Command{
- Use: "revoke ",
- Short: "Revokes leases",
-
- Run: leaseRevokeCommandFunc,
- }
-
- return lc
-}
-
-// leaseRevokeCommandFunc executes the "lease grant" command.
-func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease revoke command needs 1 argument"))
- }
-
- id := leaseFromArgs(args[0])
- ctx, cancel := commandCtx(cmd)
- resp, err := mustClientFromCmd(cmd).Revoke(ctx, id)
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to revoke lease (%v)", err))
- }
- display.Revoke(id, *resp)
-}
-
-var timeToLiveKeys bool
-
-// NewLeaseTimeToLiveCommand returns the cobra command for "lease timetolive".
-func NewLeaseTimeToLiveCommand() *cobra.Command {
- lc := &cobra.Command{
- Use: "timetolive [options]",
- Short: "Get lease information",
-
- Run: leaseTimeToLiveCommandFunc,
- }
- lc.Flags().BoolVar(&timeToLiveKeys, "keys", false, "Get keys attached to this lease")
-
- return lc
-}
-
-// leaseTimeToLiveCommandFunc executes the "lease timetolive" command.
-func leaseTimeToLiveCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease timetolive command needs lease ID as argument"))
- }
- var opts []v3.LeaseOption
- if timeToLiveKeys {
- opts = append(opts, v3.WithAttachedKeys())
- }
- resp, rerr := mustClientFromCmd(cmd).TimeToLive(context.TODO(), leaseFromArgs(args[0]), opts...)
- if rerr != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadConnection, rerr)
- }
- display.TimeToLive(*resp, timeToLiveKeys)
-}
-
-// NewLeaseListCommand returns the cobra command for "lease list".
-func NewLeaseListCommand() *cobra.Command {
- lc := &cobra.Command{
- Use: "list",
- Short: "List all active leases",
- Run: leaseListCommandFunc,
- }
- return lc
-}
-
-// leaseListCommandFunc executes the "lease list" command.
-func leaseListCommandFunc(cmd *cobra.Command, args []string) {
- resp, rerr := mustClientFromCmd(cmd).Leases(context.TODO())
- if rerr != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadConnection, rerr)
- }
- display.Leases(*resp)
-}
-
-var (
- leaseKeepAliveOnce bool
-)
-
-// NewLeaseKeepAliveCommand returns the cobra command for "lease keep-alive".
-func NewLeaseKeepAliveCommand() *cobra.Command {
- lc := &cobra.Command{
- Use: "keep-alive [options] ",
- Short: "Keeps leases alive (renew)",
-
- Run: leaseKeepAliveCommandFunc,
- }
-
- lc.Flags().BoolVar(&leaseKeepAliveOnce, "once", false, "Resets the keep-alive time to its original value and cobrautl.Exits immediately")
-
- return lc
-}
-
-// leaseKeepAliveCommandFunc executes the "lease keep-alive" command.
-func leaseKeepAliveCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease keep-alive command needs lease ID as argument"))
- }
-
- id := leaseFromArgs(args[0])
-
- if leaseKeepAliveOnce {
- respc, kerr := mustClientFromCmd(cmd).KeepAliveOnce(context.TODO(), id)
- if kerr != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadConnection, kerr)
- }
- display.KeepAlive(*respc)
- return
- }
-
- respc, kerr := mustClientFromCmd(cmd).KeepAlive(context.TODO(), id)
- if kerr != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadConnection, kerr)
- }
- for resp := range respc {
- display.KeepAlive(*resp)
- }
-
- if _, ok := (display).(*simplePrinter); ok {
- fmt.Printf("lease %016x expired or revoked.\n", id)
- }
-}
-
-func leaseFromArgs(arg string) v3.LeaseID {
- id, err := strconv.ParseInt(arg, 16, 64)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err))
- }
- return v3.LeaseID(id)
-}
diff --git a/etcdctl/ctlv3/command/lock_command.go b/etcdctl/ctlv3/command/lock_command.go
index 0a3d866cdbc..064f9dfaaa9 100644
--- a/etcdctl/ctlv3/command/lock_command.go
+++ b/etcdctl/ctlv3/command/lock_command.go
@@ -23,9 +23,10 @@ import (
"os/signal"
"syscall"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/concurrency"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/concurrency"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
)
@@ -36,7 +37,7 @@ var lockTTL = 10
func NewLockCommand() *cobra.Command {
c := &cobra.Command{
Use: "lock [exec-command arg1 arg2 ...]",
- Short: "Acquires a named lock",
+ Short: "获取命名锁",
Run: lockCommandFunc,
}
c.Flags().IntVarP(&lockTTL, "ttl", "", lockTTL, "timeout for session")
diff --git a/etcdctl/ctlv3/command/make_mirror_command.go b/etcdctl/ctlv3/command/make_mirror_command.go
index 1665330e835..aaf30fe793e 100644
--- a/etcdctl/ctlv3/command/make_mirror_command.go
+++ b/etcdctl/ctlv3/command/make_mirror_command.go
@@ -23,21 +23,16 @@ import (
"time"
"github.com/bgentry/speakeasy"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-
- "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/mirror"
+ "github.com/ls-2018/etcd_cn/client_sdk/v3/mirror"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
"github.com/spf13/cobra"
)
-const (
- defaultMaxTxnOps = uint(128)
-)
-
var (
mminsecureTr bool
mmcert string
@@ -48,57 +43,52 @@ var (
mmuser string
mmpassword string
mmnodestprefix bool
- mmrev int64
- mmmaxTxnOps uint
)
// NewMakeMirrorCommand returns the cobra command for "makeMirror".
func NewMakeMirrorCommand() *cobra.Command {
c := &cobra.Command{
Use: "make-mirror [options] ",
- Short: "Makes a mirror at the destination etcd cluster",
+ Short: "在目标etcd集群上创建镜像",
Run: makeMirrorCommandFunc,
}
- c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror")
- c.Flags().Int64Var(&mmrev, "rev", 0, "Specify the kv revision to start to mirror")
- c.Flags().UintVar(&mmmaxTxnOps, "max-txn-ops", defaultMaxTxnOps, "Maximum number of operations permitted in a transaction during syncing updates.")
- c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster")
- c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster")
- c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster")
- c.Flags().StringVar(&mmkey, "dest-key", "", "Identify secure client using this TLS key file")
- c.Flags().StringVar(&mmcacert, "dest-cacert", "", "Verify certificates of TLS enabled secure servers using this CA bundle")
- // TODO: secure by default when etcd enables secure gRPC by default.
- c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "Disable transport security for client connections")
- c.Flags().StringVar(&mmuser, "dest-user", "", "Destination username[:password] for authentication (prompt if password is not supplied)")
- c.Flags().StringVar(&mmpassword, "dest-password", "", "Destination password for authentication (if this option is used, --user option shouldn't include password)")
+ c.Flags().StringVar(&mmprefix, "prefix", "", "为那个前缀打快照")
+ c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "将一个source前缀 镜像到 目标集群中的另一个前缀")
+ c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "kv镜像到另一个集群的根目录下")
+ c.Flags().StringVar(&mmcert, "dest-cert", "", "使用此TLS证书文件为目标集群识别安全客户端")
+ c.Flags().StringVar(&mmkey, "dest-key", "", "使用此TLS私钥文件为目标集群识别安全客户端")
+ c.Flags().StringVar(&mmcacert, "dest-cacert", "", "使用此CA包验证启用TLS的安全服务器的证书")
+ c.Flags().BoolVar(&mminsecureTr, "dest-insecure-transport", true, "为客户端连接禁用传输安全性")
+ c.Flags().StringVar(&mmuser, "dest-user", "", "目标集群的 username[:password]")
+ c.Flags().StringVar(&mmpassword, "dest-password", "", "目标集群的密码")
return c
}
-func authDestCfg() *clientv3.AuthConfig {
+func authDestCfg() *authCfg {
if mmuser == "" {
return nil
}
- var cfg clientv3.AuthConfig
+ var cfg authCfg
if mmpassword == "" {
splitted := strings.SplitN(mmuser, ":", 2)
if len(splitted) < 2 {
var err error
- cfg.Username = mmuser
- cfg.Password, err = speakeasy.Ask("Destination Password: ")
+ cfg.username = mmuser
+ cfg.password, err = speakeasy.Ask("Destination Password: ")
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
} else {
- cfg.Username = splitted[0]
- cfg.Password = splitted[1]
+ cfg.username = splitted[0]
+ cfg.password = splitted[1]
}
} else {
- cfg.Username = mmuser
- cfg.Password = mmpassword
+ cfg.username = mmuser
+ cfg.password = mmpassword
}
return &cfg
@@ -112,24 +102,24 @@ func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
dialTimeout := dialTimeoutFromCmd(cmd)
keepAliveTime := keepAliveTimeFromCmd(cmd)
keepAliveTimeout := keepAliveTimeoutFromCmd(cmd)
- sec := &clientv3.SecureConfig{
- Cert: mmcert,
- Key: mmkey,
- Cacert: mmcacert,
- InsecureTransport: mminsecureTr,
+ sec := &secureCfg{
+ cert: mmcert,
+ key: mmkey,
+ cacert: mmcacert,
+ insecureTransport: mminsecureTr,
}
auth := authDestCfg()
- cc := &clientv3.ConfigSpec{
- Endpoints: []string{args[0]},
- DialTimeout: dialTimeout,
- KeepAliveTime: keepAliveTime,
- KeepAliveTimeout: keepAliveTimeout,
- Secure: sec,
- Auth: auth,
+ cc := &clientConfig{
+ endpoints: []string{args[0]},
+ dialTimeout: dialTimeout,
+ keepAliveTime: keepAliveTime,
+ keepAliveTimeout: keepAliveTimeout,
+ scfg: sec,
+ acfg: auth,
}
- dc := mustClient(cc)
+ dc := cc.mustClient() // 目标集群
c := mustClientFromCmd(cmd)
err := makeMirror(context.TODO(), c, dc)
@@ -139,49 +129,40 @@ func makeMirrorCommandFunc(cmd *cobra.Command, args []string) {
func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error {
total := int64(0)
- // if destination prefix is specified and remove destination prefix is true return error
- if mmnodestprefix && len(mmdestprefix) > 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
- }
-
go func() {
for {
time.Sleep(30 * time.Second)
- fmt.Println(atomic.LoadInt64(&total))
+ fmt.Println("total--->:", atomic.LoadInt64(&total))
}
}()
- startRev := mmrev - 1
- if startRev < 0 {
- startRev = 0
- }
+ s := mirror.NewSyncer(c, mmprefix, 0)
- s := mirror.NewSyncer(c, mmprefix, startRev)
+ rc, errc := s.SyncBase(ctx)
- // If a rev is provided, then do not sync the whole key space.
- // Instead, just start watching the key space starting from the rev
- if startRev == 0 {
- rc, errc := s.SyncBase(ctx)
+ // 如果指定并删除目的前缀,则返回错误
+ if mmnodestprefix && len(mmdestprefix) > 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one"))
+ }
- // if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
- if !mmnodestprefix && len(mmdestprefix) == 0 {
- mmdestprefix = mmprefix
- }
+ // if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix
+ if !mmnodestprefix && len(mmdestprefix) == 0 {
+ mmdestprefix = mmprefix
+ }
- for r := range rc {
- for _, kv := range r.Kvs {
- _, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value))
- if err != nil {
- return err
- }
- atomic.AddInt64(&total, 1)
+ for r := range rc {
+ for _, kv := range r.Kvs {
+ _, err := dc.Put(ctx, modifyPrefix(kv.Key), kv.Value)
+ if err != nil {
+ return err
}
+ atomic.AddInt64(&total, 1)
}
+ }
- err := <-errc
- if err != nil {
- return err
- }
+ err := <-errc
+ if err != nil {
+ return err
}
wc := s.SyncUpdates(ctx)
@@ -204,21 +185,12 @@ func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) er
ops = []clientv3.Op{}
}
lastRev = nextRev
-
- if len(ops) == int(mmmaxTxnOps) {
- _, err := dc.Txn(ctx).Then(ops...).Commit()
- if err != nil {
- return err
- }
- ops = []clientv3.Op{}
- }
-
switch ev.Type {
case mvccpb.PUT:
- ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
+ ops = append(ops, clientv3.OpPut(modifyPrefix(ev.Kv.Key), ev.Kv.Value))
atomic.AddInt64(&total, 1)
case mvccpb.DELETE:
- ops = append(ops, clientv3.OpDelete(modifyPrefix(string(ev.Kv.Key))))
+ ops = append(ops, clientv3.OpDelete(modifyPrefix(ev.Kv.Key)))
atomic.AddInt64(&total, 1)
default:
panic("unexpected event type")
diff --git a/etcdctl/ctlv3/command/member_command.go b/etcdctl/ctlv3/command/member_command.go
index 53b624b9881..4756d637c3f 100644
--- a/etcdctl/ctlv3/command/member_command.go
+++ b/etcdctl/ctlv3/command/member_command.go
@@ -20,10 +20,10 @@ import (
"strconv"
"strings"
- "github.com/spf13/cobra"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/spf13/cobra"
)
var (
@@ -35,7 +35,7 @@ var (
func NewMemberCommand() *cobra.Command {
mc := &cobra.Command{
Use: "member ",
- Short: "Membership related commands",
+ Short: "节点相关的命令",
}
mc.AddCommand(NewMemberAddCommand())
@@ -51,13 +51,13 @@ func NewMemberCommand() *cobra.Command {
func NewMemberAddCommand() *cobra.Command {
cc := &cobra.Command{
Use: "add [options]",
- Short: "Adds a member into the cluster",
+ Short: "添加一个节点",
Run: memberAddCommandFunc,
}
- cc.Flags().StringVar(&memberPeerURLs, "peer-urls", "", "comma separated peer URLs for the new member.")
- cc.Flags().BoolVar(&isLearner, "learner", false, "indicates if the new member is raft learner")
+ cc.Flags().StringVar(&memberPeerURLs, "peer-urls", "", "用逗号分隔新成员的对等url.")
+ cc.Flags().BoolVar(&isLearner, "learner", false, "表示新成员是否为learner")
return cc
}
@@ -66,7 +66,7 @@ func NewMemberAddCommand() *cobra.Command {
func NewMemberRemoveCommand() *cobra.Command {
cc := &cobra.Command{
Use: "remove ",
- Short: "Removes a member from the cluster",
+ Short: "从集群中移除成员",
Run: memberRemoveCommandFunc,
}
@@ -78,7 +78,7 @@ func NewMemberRemoveCommand() *cobra.Command {
func NewMemberUpdateCommand() *cobra.Command {
cc := &cobra.Command{
Use: "update [options]",
- Short: "Updates a member in the cluster",
+ Short: "更新节点通信地址",
Run: memberUpdateCommandFunc,
}
@@ -92,12 +92,8 @@ func NewMemberUpdateCommand() *cobra.Command {
func NewMemberListCommand() *cobra.Command {
cc := &cobra.Command{
Use: "list",
- Short: "Lists all members in the cluster",
- Long: `When --write-out is set to simple, this command prints out comma-separated member lists for each endpoint.
-The items in the lists are ID, Status, Name, Peer Addrs, Client Addrs, Is Learner.
-`,
-
- Run: memberListCommandFunc,
+ Short: "显示集群所有成员",
+ Run: memberListCommandFunc,
}
return cc
@@ -107,11 +103,8 @@ The items in the lists are ID, Status, Name, Peer Addrs, Client Addrs, Is Learne
func NewMemberPromoteCommand() *cobra.Command {
cc := &cobra.Command{
Use: "promote ",
- Short: "Promotes a non-voting member in the cluster",
- Long: `Promotes a non-voting learner member to a voting one in the cluster.
-`,
-
- Run: memberPromoteCommandFunc,
+ Short: "提升一个learner节点",
+ Run: memberPromoteCommandFunc,
}
return cc
@@ -158,7 +151,7 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) {
display.MemberAdd(*resp)
if _, ok := (display).(*simplePrinter); ok {
- var conf []string
+ conf := []string{}
for _, memb := range resp.Members {
for _, u := range memb.PeerURLs {
n := memb.Name
@@ -173,7 +166,7 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) {
fmt.Printf("ETCD_NAME=%q\n", newMemberName)
fmt.Printf("ETCD_INITIAL_CLUSTER=%q\n", strings.Join(conf, ","))
fmt.Printf("ETCD_INITIAL_ADVERTISE_PEER_URLS=%q\n", memberPeerURLs)
- fmt.Print("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n")
+ fmt.Printf("ETCD_INITIAL_CLUSTER_STATE=\"existing\"\n")
}
}
diff --git a/etcdctl/ctlv3/command/move_leader_command.go b/etcdctl/ctlv3/command/move_leader_command.go
index a7b4f397b1c..f1cc3cebb08 100644
--- a/etcdctl/ctlv3/command/move_leader_command.go
+++ b/etcdctl/ctlv3/command/move_leader_command.go
@@ -18,17 +18,17 @@ import (
"fmt"
"strconv"
- "github.com/spf13/cobra"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/spf13/cobra"
)
// NewMoveLeaderCommand returns the cobra command for "move-leader".
func NewMoveLeaderCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "move-leader ",
- Short: "Transfers leadership to another etcd cluster member.",
+ Short: "触发leader转移",
Run: transferLeadershipCommandFunc,
}
return cmd
@@ -44,20 +44,20 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
- cfg := clientConfigFromCmd(cmd)
- cli := mustClient(cfg)
- eps := cli.Endpoints()
- cli.Close()
+ c := mustClientFromCmd(cmd)
+ eps := c.Endpoints()
+ c.Close()
ctx, cancel := commandCtx(cmd)
- // find current leader
var leaderCli *clientv3.Client
var leaderID uint64
+ // 找到当前的leader
for _, ep := range eps {
- cfg.Endpoints = []string{ep}
- cli := mustClient(cfg)
- resp, serr := cli.Status(ctx, ep)
+ cfg := clientConfigFromCmd(cmd)
+ cfg.endpoints = []string{ep}
+ cli := cfg.mustClient()
+ resp, serr := cli.Status(ctx, ep) // 获取单个节点状态
if serr != nil {
cobrautl.ExitWithError(cobrautl.ExitError, serr)
}
@@ -69,6 +69,7 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) {
}
cli.Close()
}
+
if leaderCli == nil {
cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("no leader endpoint given at %v", eps))
}
diff --git a/etcdctl/ctlv3/command/over_alarm_command.go b/etcdctl/ctlv3/command/over_alarm_command.go
new file mode 100644
index 00000000000..a825bbe79d4
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_alarm_command.go
@@ -0,0 +1,82 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "fmt"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/spf13/cobra"
+)
+
+// NewAlarmCommand returns the cobra command for "alarm".
+func NewAlarmCommand() *cobra.Command {
+ ac := &cobra.Command{
+ Use: "alarm ",
+ Short: "Alarm related commands",
+ }
+
+ ac.AddCommand(NewAlarmDisarmCommand())
+ ac.AddCommand(NewAlarmListCommand())
+
+ return ac
+}
+
+func NewAlarmDisarmCommand() *cobra.Command {
+ cmd := cobra.Command{
+ Use: "disarm",
+ Short: "解除所有警报",
+ Run: alarmDisarmCommandFunc,
+ }
+ return &cmd
+}
+
+// alarmDisarmCommandFunc executes the "alarm disarm" command.
+func alarmDisarmCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("alarm disarm command accepts no arguments"))
+ }
+ ctx, cancel := commandCtx(cmd)
+ resp, err := mustClientFromCmd(cmd).AlarmDisarm(ctx, &v3.AlarmMember{})
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ display.Alarm(*resp)
+}
+
+func NewAlarmListCommand() *cobra.Command {
+ cmd := cobra.Command{
+ Use: "list",
+ Short: "列出所有警报",
+ Run: alarmListCommandFunc,
+ }
+ return &cmd
+}
+
+// alarmListCommandFunc executes the "alarm list" command.
+func alarmListCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("alarm list command accepts no arguments"))
+ }
+ ctx, cancel := commandCtx(cmd)
+ resp, err := mustClientFromCmd(cmd).AlarmList(ctx)
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ display.Alarm(*resp)
+}
diff --git a/etcdctl/ctlv3/command/over_compaction_command.go b/etcdctl/ctlv3/command/over_compaction_command.go
new file mode 100644
index 00000000000..37c9d314f1d
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_compaction_command.go
@@ -0,0 +1,64 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License
+
+package command
+
+import (
+ "fmt"
+ "strconv"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/spf13/cobra"
+)
+
+var compactPhysical bool
+
+// NewCompactionCommand returns the cobra command for "compaction".
+func NewCompactionCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "compaction [options] ",
+ Short: "压缩etcd中的事件历史记录",
+ Run: compactionCommandFunc,
+ }
+ cmd.Flags().BoolVar(&compactPhysical, "physical", false, "'true' 用于等待压缩从物理上删除所有旧修订")
+ return cmd
+}
+
+// compactionCommandFunc executes the "compaction" command.
+func compactionCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("compaction command needs 1 argument"))
+ }
+
+ rev, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ var opts []clientv3.CompactOption
+ if compactPhysical {
+ opts = append(opts, clientv3.WithCompactPhysical())
+ }
+
+ c := mustClientFromCmd(cmd)
+ ctx, cancel := commandCtx(cmd)
+ _, cerr := c.Compact(ctx, rev, opts...)
+ cancel()
+ if cerr != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, cerr)
+ }
+ fmt.Println("已压缩了修订版本:", rev)
+}
diff --git a/etcdctl/ctlv3/command/over_del_command.go b/etcdctl/ctlv3/command/over_del_command.go
new file mode 100644
index 00000000000..a4fcb3be2ea
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_del_command.go
@@ -0,0 +1,95 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "fmt"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/spf13/cobra"
+)
+
+var (
+ delPrefix bool
+ delPrevKV bool
+ delFromKey bool
+)
+
+// NewDelCommand returns the cobra command for "del".
+func NewDelCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "del [options] [range_end]",
+ Short: "移除指定的键或键的范围 [key, range_end)",
+ Run: delCommandFunc,
+ }
+
+ cmd.Flags().BoolVar(&delPrefix, "prefix", false, "通过匹配前缀删除键")
+ cmd.Flags().BoolVar(&delPrevKV, "prev-kv", false, "返回删除的k,v 键值对")
+ cmd.Flags().BoolVar(&delFromKey, "from-key", false, "使用字节比较法删除大于或等于给定键的键.")
+ return cmd
+}
+
+// delCommandFunc executes the "del" command.
+func delCommandFunc(cmd *cobra.Command, args []string) {
+ key, opts := getDelOp(args)
+ ctx, cancel := commandCtx(cmd)
+ resp, err := mustClientFromCmd(cmd).Delete(ctx, key, opts...)
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ display.Del(*resp)
+}
+
+func getDelOp(args []string) (string, []clientv3.OpOption) {
+ if len(args) == 0 || len(args) > 2 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("del command needs one argument as key and an optional argument as range_end"))
+ }
+
+ if delPrefix && delFromKey {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one"))
+ }
+
+ opts := []clientv3.OpOption{}
+ key := args[0]
+ if len(args) > 1 {
+ if delPrefix || delFromKey {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("too many arguments, only accept one argument when `--prefix` or `--from-key` is set"))
+ }
+ opts = append(opts, clientv3.WithRange(args[1]))
+ }
+
+ if delPrefix {
+ if len(key) == 0 {
+ key = "\x00"
+ opts = append(opts, clientv3.WithFromKey())
+ } else {
+ opts = append(opts, clientv3.WithPrefix())
+ }
+ }
+ if delPrevKV {
+ opts = append(opts, clientv3.WithPrevKV())
+ }
+
+ if delFromKey {
+ if len(key) == 0 {
+ key = "\x00"
+ }
+ opts = append(opts, clientv3.WithFromKey())
+ }
+
+ return key, opts
+}
diff --git a/etcdctl/ctlv3/command/over_ep_command.go b/etcdctl/ctlv3/command/over_ep_command.go
new file mode 100644
index 00000000000..7465bc2e938
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_ep_command.go
@@ -0,0 +1,280 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/v3rpc/rpctypes"
+ "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/ls-2018/etcd_cn/pkg/flags"
+
+ "github.com/spf13/cobra"
+ "go.uber.org/zap"
+)
+
+var (
+ epClusterEndpoints bool
+ epHashKVRev int64
+)
+
+// NewEndpointCommand returns the cobra command for "endpoint".
+func NewEndpointCommand() *cobra.Command {
+ ec := &cobra.Command{
+ Use: "endpoint ",
+ Short: "Endpoint related commands",
+ }
+
+ ec.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list")
+ ec.AddCommand(newEpHealthCommand())
+ ec.AddCommand(newEpStatusCommand())
+ ec.AddCommand(newEpHashKVCommand())
+
+ return ec
+}
+
+func newEpHealthCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "health",
+ Short: "检查端点的健康程度",
+ Run: epHealthCommandFunc,
+ }
+
+ return cmd
+}
+
+func newEpStatusCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "status",
+ Short: "打印出指定端点的状态",
+ Long: ``,
+ Run: epStatusCommandFunc,
+ }
+}
+
+func newEpHashKVCommand() *cobra.Command {
+ hc := &cobra.Command{
+ Use: "hashkv",
+ Short: "输出每个端点的KV历史哈希值",
+ Run: epHashKVCommandFunc,
+ }
+ hc.PersistentFlags().Int64Var(&epHashKVRev, "rev", 0, "maximum revision to hash (default: all revisions)")
+ return hc
+}
+
+type epHealth struct {
+ Ep string `json:"endpoint"`
+ Health bool `json:"health"`
+ Took string `json:"took"`
+ Error string `json:"error,omitempty"`
+}
+
+func epHealthCommandFunc(cmd *cobra.Command, args []string) {
+ lg, err := zap.NewProduction()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ flags.SetPflagsFromEnv(lg, "ETCDCTL", cmd.InheritedFlags())
+ initDisplayFromCmd(cmd)
+
+ sec := secureCfgFromCmd(cmd)
+ dt := dialTimeoutFromCmd(cmd)
+ ka := keepAliveTimeFromCmd(cmd)
+ kat := keepAliveTimeoutFromCmd(cmd)
+ auth := authCfgFromCmd(cmd)
+ var cfgs []*v3.Config
+ for _, ep := range endpointsFromCluster(cmd) {
+ cfg, err := newClientCfg([]string{ep}, dt, ka, kat, sec, auth)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
+ }
+ cfgs = append(cfgs, cfg)
+ }
+
+ var wg sync.WaitGroup
+ hch := make(chan epHealth, len(cfgs))
+ for _, cfg := range cfgs {
+ wg.Add(1)
+ go func(cfg *v3.Config) {
+ defer wg.Done()
+ ep := cfg.Endpoints[0]
+ cfg.Logger = lg.Named("client")
+ cli, err := v3.New(*cfg)
+ if err != nil {
+ hch <- epHealth{Ep: ep, Health: false, Error: err.Error()}
+ return
+ }
+ st := time.Now()
+ // 得到一个随机的key.只要我们能够获得响应而没有错误,端点就是健康状态.
+ ctx, cancel := commandCtx(cmd)
+ _, err = cli.Get(ctx, "health")
+ eh := epHealth{Ep: ep, Health: false, Took: time.Since(st).String()}
+ // 权限拒绝是可以的,因为提案通过协商一致得到它
+ if err == nil || err == rpctypes.ErrPermissionDenied {
+ eh.Health = true
+ } else {
+ eh.Error = err.Error()
+ }
+
+ if eh.Health {
+ resp, err := cli.AlarmList(ctx)
+ if err == nil && len(resp.Alarms) > 0 {
+ eh.Health = false
+ eh.Error = "存在警报(s): "
+ for _, v := range resp.Alarms {
+ switch v.Alarm {
+ case etcdserverpb.AlarmType_NOSPACE:
+ eh.Error = eh.Error + "NOSPACE "
+ case etcdserverpb.AlarmType_CORRUPT:
+ eh.Error = eh.Error + "CORRUPT "
+ default:
+ eh.Error = eh.Error + "UNKNOWN "
+ }
+ }
+ } else if err != nil {
+ eh.Health = false
+ eh.Error = "无法获取alarm信息"
+ }
+ }
+ cancel()
+ hch <- eh
+ }(cfg)
+ }
+
+ wg.Wait()
+ close(hch)
+
+ errs := false
+ var healthList []epHealth
+ for h := range hch {
+ healthList = append(healthList, h)
+ if h.Error != "" {
+ errs = true
+ }
+ }
+ display.EndpointHealth(healthList)
+ if errs {
+ cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("unhealthy cluster"))
+ }
+}
+
+type epStatus struct {
+ Ep string `json:"Endpoint"`
+ Resp *v3.StatusResponse `json:"Status"`
+}
+
+func epStatusCommandFunc(cmd *cobra.Command, args []string) {
+ c := mustClientFromCmd(cmd)
+
+ var statusList []epStatus
+ var err error
+ for _, ep := range endpointsFromCluster(cmd) {
+ ctx, cancel := commandCtx(cmd)
+ resp, serr := c.Status(ctx, ep)
+ cancel()
+ if serr != nil {
+ err = serr
+ fmt.Fprintf(os.Stderr, "获取端点状态失败%s (%v)\n", ep, serr)
+ continue
+ }
+ statusList = append(statusList, epStatus{Ep: ep, Resp: resp})
+ }
+
+ display.EndpointStatus(statusList)
+
+ if err != nil {
+ os.Exit(cobrautl.ExitError)
+ }
+}
+
+type epHashKV struct {
+ Ep string `json:"Endpoint"`
+ Resp *v3.HashKVResponse `json:"HashKV"`
+}
+
+func epHashKVCommandFunc(cmd *cobra.Command, args []string) {
+ c := mustClientFromCmd(cmd)
+
+ hashList := []epHashKV{}
+ var err error
+ for _, ep := range endpointsFromCluster(cmd) {
+ ctx, cancel := commandCtx(cmd)
+ resp, serr := c.HashKV(ctx, ep, epHashKVRev)
+ cancel()
+ if serr != nil {
+ err = serr
+ fmt.Fprintf(os.Stderr, "Failed to get the hash of endpoint %s (%v)\n", ep, serr)
+ continue
+ }
+ hashList = append(hashList, epHashKV{Ep: ep, Resp: resp})
+ }
+
+ display.EndpointHashKV(hashList)
+
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+}
+
+func endpointsFromCluster(cmd *cobra.Command) []string {
+ if !epClusterEndpoints {
+ endpoints, err := cmd.Flags().GetStringSlice("endpoints")
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ return endpoints
+ }
+
+ sec := secureCfgFromCmd(cmd)
+ dt := dialTimeoutFromCmd(cmd)
+ ka := keepAliveTimeFromCmd(cmd)
+ kat := keepAliveTimeoutFromCmd(cmd)
+ eps, err := endpointsFromCmd(cmd)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ // exclude auth for not asking needless password (MemberList() doesn't need authentication)
+
+ cfg, err := newClientCfg(eps, dt, ka, kat, sec, nil)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ c, err := v3.New(*cfg)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ ctx, cancel := commandCtx(cmd)
+ defer func() {
+ c.Close()
+ cancel()
+ }()
+ membs, err := c.MemberList(ctx)
+ if err != nil {
+ err = fmt.Errorf("failed to fetch endpoints from etcd cluster member list: %v", err)
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ var ret []string
+ for _, m := range membs.Members {
+ ret = append(ret, m.ClientURLs...)
+ }
+ return ret
+}
diff --git a/etcdctl/ctlv3/command/over_get_command.go b/etcdctl/ctlv3/command/over_get_command.go
new file mode 100644
index 00000000000..fa1d3917cd9
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_get_command.go
@@ -0,0 +1,181 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/spf13/cobra"
+)
+
+var (
+ getConsistency string
+ getLimit int64
+ getSortOrder string
+ getSortTarget string
+ getPrefix bool
+ getFromKey bool
+ getRev int64
+ getKeysOnly bool
+ getCountOnly bool
+ printValueOnly bool
+)
+
+func NewGetCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "get [options] [range_end]",
+ Short: "获取键或键的范围",
+ Run: getCommandFunc,
+ }
+
+ cmd.Flags().StringVar(&getConsistency, "consistency", "l", "Linearizable(l) or Serializable(s)")
+ cmd.Flags().StringVar(&getSortOrder, "order", "", "对结果排序; ASCEND or DESCEND (ASCEND by default)")
+ cmd.Flags().StringVar(&getSortTarget, "sort-by", "", "使用那个字段排序; CREATE, KEY, MODIFY, VALUE, or VERSION")
+ cmd.Flags().Int64Var(&getLimit, "limit", 0, "结果的最大数量")
+ cmd.Flags().BoolVar(&getPrefix, "prefix", false, "返回前缀匹配的keys")
+ cmd.Flags().BoolVar(&getFromKey, "from-key", false, "使用byte compare获取 >= 给定键的键")
+ cmd.Flags().Int64Var(&getRev, "rev", 0, "指定修订版本")
+ cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "只获取keys")
+ cmd.Flags().BoolVar(&getCountOnly, "count-only", false, "只获取匹配的数量")
+ cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `仅在使用“simple"输出格式时写入值`)
+ return cmd
+}
+
+func getCommandFunc(cmd *cobra.Command, args []string) {
+ key, opts := getGetOp(args)
+ ctx, cancel := commandCtx(cmd)
+ resp, err := mustClientFromCmd(cmd).Get(ctx, key, opts...)
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ if getCountOnly {
+ if _, fields := display.(*fieldsPrinter); !fields {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("--count-only is only for `--write-out=fields`"))
+ }
+ }
+
+ if printValueOnly {
+ dp, simple := (display).(*simplePrinter)
+ if !simple {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("print-value-only is only for `--write-out=simple`"))
+ }
+ dp.valueOnly = true
+ }
+ display.Get(*resp)
+}
+
+func getGetOp(args []string) (string, []clientv3.OpOption) {
+ if len(args) == 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("get command needs one argument as key and an optional argument as range_end"))
+ }
+
+ if getPrefix && getFromKey {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one"))
+ }
+
+ if getKeysOnly && getCountOnly {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one"))
+ }
+
+ var opts []clientv3.OpOption
+ fmt.Println("getConsistency", getConsistency)
+ switch getConsistency {
+ case "s":
+ opts = append(opts, clientv3.WithSerializable())
+ case "l":
+ // 默认就是串行化读
+ default:
+ cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("未知的 consistency 标志 %q", getConsistency))
+ }
+
+ key := args[0]
+ if len(args) > 1 {
+ if getPrefix || getFromKey {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("too many arguments, only accept one argument when `--prefix` or `--from-key` is set"))
+ }
+ opts = append(opts, clientv3.WithRange(args[1]))
+ }
+
+ opts = append(opts, clientv3.WithLimit(getLimit))
+ if getRev > 0 {
+ opts = append(opts, clientv3.WithRev(getRev))
+ }
+
+ sortByOrder := clientv3.SortNone
+ sortOrder := strings.ToUpper(getSortOrder)
+ switch {
+ case sortOrder == "ASCEND":
+ sortByOrder = clientv3.SortAscend
+ case sortOrder == "DESCEND":
+ sortByOrder = clientv3.SortDescend
+ case sortOrder == "":
+ // nothing
+ default:
+ cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("bad sort order %v", getSortOrder))
+ }
+
+ sortByTarget := clientv3.SortByKey
+ sortTarget := strings.ToUpper(getSortTarget)
+ switch {
+ case sortTarget == "CREATE":
+ sortByTarget = clientv3.SortByCreateRevision
+ case sortTarget == "KEY":
+ sortByTarget = clientv3.SortByKey
+ case sortTarget == "MODIFY":
+ sortByTarget = clientv3.SortByModRevision
+ case sortTarget == "VALUE":
+ sortByTarget = clientv3.SortByValue
+ case sortTarget == "VERSION":
+ sortByTarget = clientv3.SortByVersion
+ case sortTarget == "":
+ // nothing
+ default:
+ cobrautl.ExitWithError(cobrautl.ExitBadFeature, fmt.Errorf("bad sort target %v", getSortTarget))
+ }
+
+ opts = append(opts, clientv3.WithSort(sortByTarget, sortByOrder))
+
+ if getPrefix {
+ if len(key) == 0 {
+ key = "\x00"
+ opts = append(opts, clientv3.WithFromKey())
+ } else {
+ opts = append(opts, clientv3.WithPrefix())
+ }
+ }
+
+ if getFromKey {
+ if len(key) == 0 {
+ key = "\x00"
+ }
+ opts = append(opts, clientv3.WithFromKey())
+ }
+
+ if getKeysOnly {
+ opts = append(opts, clientv3.WithKeysOnly())
+ }
+
+ if getCountOnly {
+ opts = append(opts, clientv3.WithCountOnly())
+ }
+
+ return key, opts
+}
diff --git a/etcdctl/ctlv3/command/over_lease_command.go b/etcdctl/ctlv3/command/over_lease_command.go
new file mode 100644
index 00000000000..26fad9361e0
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_lease_command.go
@@ -0,0 +1,205 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+
+ "github.com/spf13/cobra"
+)
+
+// NewLeaseCommand returns the cobra command for "lease".
+func NewLeaseCommand() *cobra.Command {
+ lc := &cobra.Command{
+ Use: "lease ",
+ Short: "租约相关命令",
+ }
+
+ lc.AddCommand(NewLeaseGrantCommand())
+ lc.AddCommand(NewLeaseRevokeCommand())
+ lc.AddCommand(NewLeaseTimeToLiveCommand())
+ lc.AddCommand(NewLeaseListCommand())
+ lc.AddCommand(NewLeaseKeepAliveCommand())
+
+ return lc
+}
+
+// NewLeaseGrantCommand returns the cobra command for "lease grant".
+func NewLeaseGrantCommand() *cobra.Command {
+ lc := &cobra.Command{
+ Use: "grant ",
+ Short: "创建租约",
+
+ Run: leaseGrantCommandFunc,
+ }
+
+ return lc
+}
+
+func leaseGrantCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease grant命令需要TTL参数"))
+ }
+
+ ttl, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("错误的ttl (%v)", err))
+ }
+
+ ctx, cancel := commandCtx(cmd)
+ resp, err := mustClientFromCmd(cmd).Grant(ctx, ttl)
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("创建租约失败 (%v)", err))
+ }
+ display.Grant(*resp)
+}
+
+// NewLeaseRevokeCommand returns the cobra command for "lease revoke".
+func NewLeaseRevokeCommand() *cobra.Command {
+ lc := &cobra.Command{
+ Use: "revoke ",
+ Short: "移除租约",
+
+ Run: leaseRevokeCommandFunc,
+ }
+
+ return lc
+}
+
+// leaseRevokeCommandFunc executes the "lease grant" command.
+func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease revoke command needs 1 argument"))
+ }
+
+ id := leaseFromArgs(args[0])
+ ctx, cancel := commandCtx(cmd)
+ resp, err := mustClientFromCmd(cmd).Revoke(ctx, id)
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("failed to revoke lease (%v)", err))
+ }
+ display.Revoke(id, *resp)
+}
+
+var timeToLiveKeys bool
+
+// NewLeaseTimeToLiveCommand returns the cobra command for "lease timetolive".
+func NewLeaseTimeToLiveCommand() *cobra.Command {
+ lc := &cobra.Command{
+ Use: "timetolive [options]",
+ Short: "获取租约信息",
+
+ Run: leaseTimeToLiveCommandFunc,
+ }
+ lc.Flags().BoolVar(&timeToLiveKeys, "keys", false, "获取租约附加到了哪些key上")
+
+ return lc
+}
+
+// leaseTimeToLiveCommandFunc executes the "lease timetolive" command.
+func leaseTimeToLiveCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease timetolive command needs lease ID as argument"))
+ }
+ var opts []v3.LeaseOption
+ if timeToLiveKeys {
+ opts = append(opts, v3.WithAttachedKeys())
+ }
+ resp, rerr := mustClientFromCmd(cmd).TimeToLive(context.TODO(), leaseFromArgs(args[0]), opts...)
+ if rerr != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadConnection, rerr)
+ }
+ display.TimeToLive(*resp, timeToLiveKeys)
+}
+
+// NewLeaseListCommand returns the cobra command for "lease list".
+func NewLeaseListCommand() *cobra.Command {
+ lc := &cobra.Command{
+ Use: "list",
+ Short: "显示所有租约",
+ Run: leaseListCommandFunc,
+ }
+ return lc
+}
+
+// leaseListCommandFunc executes the "lease list" command.
+func leaseListCommandFunc(cmd *cobra.Command, args []string) {
+ resp, rerr := mustClientFromCmd(cmd).Leases(context.TODO())
+ if rerr != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadConnection, rerr)
+ }
+ display.Leases(*resp)
+}
+
+var leaseKeepAliveOnce bool
+
+// NewLeaseKeepAliveCommand returns the cobra command for "lease keep-alive".
+func NewLeaseKeepAliveCommand() *cobra.Command {
+ lc := &cobra.Command{
+ Use: "keep-alive [options] ",
+ Short: "重续租约 [renew]",
+
+ Run: leaseKeepAliveCommandFunc,
+ }
+
+ lc.Flags().BoolVar(&leaseKeepAliveOnce, "once", false, "Resets the keep-alive time to its original value and cobrautl.Exits immediately")
+
+ return lc
+}
+
+// leaseKeepAliveCommandFunc executes the "lease keep-alive" command.
+func leaseKeepAliveCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("lease keep-alive命令需要lease ID作为参数"))
+ }
+
+ id := leaseFromArgs(args[0])
+
+ if leaseKeepAliveOnce {
+ respc, kerr := mustClientFromCmd(cmd).KeepAliveOnce(context.TODO(), id)
+ if kerr != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadConnection, kerr)
+ }
+ display.KeepAlive(*respc)
+ return
+ }
+
+ respc, kerr := mustClientFromCmd(cmd).KeepAlive(context.TODO(), id)
+ if kerr != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadConnection, kerr)
+ }
+ for resp := range respc {
+ display.KeepAlive(*resp)
+ }
+
+ if _, ok := (display).(*simplePrinter); ok {
+ fmt.Printf("租约 %016x 过期或移除.\n", id)
+ }
+}
+
+func leaseFromArgs(arg string) v3.LeaseID {
+ id, err := strconv.ParseInt(arg, 16, 64)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID arg (%v), expecting ID in Hex", err))
+ }
+ return v3.LeaseID(id)
+}
diff --git a/etcdctl/ctlv3/command/over_put_command.go b/etcdctl/ctlv3/command/over_put_command.go
new file mode 100644
index 00000000000..be259de5fb7
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_put_command.go
@@ -0,0 +1,101 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/spf13/cobra"
+)
+
+var (
+ leaseStr string
+ putPrevKV bool
+ putIgnoreVal bool
+ putIgnoreLease bool
+)
+
+// NewPutCommand returns the cobra command for "put".
+func NewPutCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "put",
+ Short: "将给定的键放入存储中",
+ Long: `将给定的键放入存储中`,
+ Run: putCommandFunc,
+ }
+ cmd.Flags().StringVar(&leaseStr, "lease", "0", "将租约附加到key (in hexadecimal) ")
+ cmd.Flags().BoolVar(&putPrevKV, "prev-kv", false, "返回键值对之前的版本")
+ cmd.Flags().BoolVar(&putIgnoreVal, "ignore-value", false, "更新当前的值")
+ cmd.Flags().BoolVar(&putIgnoreLease, "ignore-lease", false, "更新租约")
+ return cmd
+}
+
+func putCommandFunc(cmd *cobra.Command, args []string) {
+ key, value, opts := getPutOp(args)
+
+ ctx, cancel := commandCtx(cmd)
+ resp, err := mustClientFromCmd(cmd).Put(ctx, key, value, opts...)
+ cancel()
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ display.Put(*resp)
+}
+
+func getPutOp(args []string) (string, string, []clientv3.OpOption) {
+ if len(args) == 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments"))
+ }
+
+ key := args[0]
+ if putIgnoreVal && len(args) > 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs only 1 argument when 'ignore-value' is set"))
+ }
+
+ var value string
+ var err error
+ if !putIgnoreVal {
+ value, err = argOrStdin(args, os.Stdin, 1)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments"))
+ }
+ }
+
+ id, err := strconv.ParseInt(leaseStr, 16, 64)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID (%v), expecting ID in Hex", err))
+ }
+
+ var opts []clientv3.OpOption
+ if id != 0 {
+ opts = append(opts, clientv3.WithLease(clientv3.LeaseID(id)))
+ }
+ if putPrevKV {
+ opts = append(opts, clientv3.WithPrevKV())
+ }
+ if putIgnoreVal {
+ opts = append(opts, clientv3.WithIgnoreValue())
+ }
+ if putIgnoreLease {
+ opts = append(opts, clientv3.WithIgnoreLease())
+ }
+
+ return key, value, opts
+}
diff --git a/etcdctl/ctlv3/command/over_role_command.go b/etcdctl/ctlv3/command/over_role_command.go
new file mode 100644
index 00000000000..0b908822ce4
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_role_command.go
@@ -0,0 +1,241 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "context"
+ "fmt"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/spf13/cobra"
+)
+
+var (
+ rolePermPrefix bool
+ rolePermFromKey bool
+)
+
+// NewRoleCommand returns the cobra command for "role".
+func NewRoleCommand() *cobra.Command {
+ ac := &cobra.Command{
+ Use: "role ",
+ Short: "Role related commands",
+ }
+
+ ac.AddCommand(newRoleAddCommand())
+ ac.AddCommand(newRoleDeleteCommand())
+ ac.AddCommand(newRoleGetCommand())
+ ac.AddCommand(newRoleListCommand())
+ ac.AddCommand(newRoleGrantPermissionCommand())
+ ac.AddCommand(newRoleRevokePermissionCommand())
+
+ return ac
+}
+
+func newRoleAddCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "add ",
+ Short: "添加一个角色",
+ Run: roleAddCommandFunc,
+ }
+}
+
+func newRoleDeleteCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "delete ",
+ Short: "删除一个角色",
+ Run: roleDeleteCommandFunc,
+ }
+}
+
+func newRoleGetCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "get ",
+ Short: "获取一个角色的详细信息",
+ Run: roleGetCommandFunc,
+ }
+}
+
+func newRoleListCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "list",
+ Short: "显示所有角色",
+ Run: roleListCommandFunc,
+ }
+}
+
+func newRoleGrantPermissionCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "grant-permission [options] [endkey]",
+ Short: "给角色授予一个权限",
+ Run: roleGrantPermissionCommandFunc,
+ }
+
+ cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "授予前缀权限")
+ cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "使用byte compare授予大于或等于给定键的权限")
+
+ return cmd
+}
+
+func newRoleRevokePermissionCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "revoke-permission [endkey]",
+ Short: "移除角色权限里的一个key",
+ Run: roleRevokePermissionCommandFunc,
+ }
+
+ cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "取消前缀权限")
+ cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "使用byte compare撤销大于或等于给定键的权限")
+
+ return cmd
+}
+
+func roleAddCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role add命令需要角色名作为参数"))
+ }
+
+ resp, err := mustClientFromCmd(cmd).Auth.RoleAdd(context.TODO(), args[0])
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.RoleAdd(args[0], *resp)
+}
+
+func roleDeleteCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role delete command requires role name as its argument"))
+ }
+
+ resp, err := mustClientFromCmd(cmd).Auth.RoleDelete(context.TODO(), args[0])
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.RoleDelete(args[0], *resp)
+}
+
+func roleGetCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role get命令需要角色名作为参数"))
+ }
+
+ name := args[0]
+ resp, err := mustClientFromCmd(cmd).Auth.RoleGet(context.TODO(), name)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.RoleGet(name, *resp)
+}
+
+// roleListCommandFunc executes the "role list" command.
+func roleListCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role list command requires no arguments"))
+ }
+
+ resp, err := mustClientFromCmd(cmd).Auth.RoleList(context.TODO())
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.RoleList(*resp)
+}
+
+func roleGrantPermissionCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) < 3 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role grant命令需要角色名、权限类型和关键字[endkey]作为参数"))
+ }
+
+ perm, err := clientv3.StrToPermissionType(args[1]) // read write readwrite
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
+ }
+
+ key, rangeEnd := permRange(args[2:])
+ resp, err := mustClientFromCmd(cmd).Auth.RoleGrantPermission(context.TODO(), args[0], key, rangeEnd, perm)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.RoleGrantPermission(args[0], *resp)
+}
+
+func roleRevokePermissionCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) < 2 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role revoke-permission命令需要角色名和关键字[endkey]作为参数"))
+ }
+
+ key, rangeEnd := permRange(args[1:])
+ resp, err := mustClientFromCmd(cmd).Auth.RoleRevokePermission(context.TODO(), args[0], key, rangeEnd)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ display.RoleRevokePermission(args[0], args[1], rangeEnd, *resp)
+}
+
+func permRange(args []string) (string, string) {
+ key := args[0]
+ var rangeEnd string
+ if len(key) == 0 {
+ if rolePermPrefix && rolePermFromKey {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("--from-key and --prefix flags 是互相排斥的 "))
+ }
+
+ // Range permission is expressed as adt.BytesAffineInterval,
+ // so the empty prefix which should be matched with every key must be like this ["\x00", ).
+ key = "\x00"
+ if rolePermPrefix || rolePermFromKey {
+ // For the both cases of prefix and from-key, a permission with an empty key
+ // should allow access to the entire key space.
+ // 0x00 will be treated as open ended in etcd side.
+ rangeEnd = "\x00"
+ }
+ } else {
+ var err error
+ rangeEnd, err = rangeEndFromPermFlags(args[0:])
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
+ }
+ }
+ return key, rangeEnd
+}
+
+func rangeEndFromPermFlags(args []string) (string, error) {
+ if len(args) == 1 {
+ if rolePermPrefix {
+ if rolePermFromKey {
+ return "", fmt.Errorf("--from-key and --prefix flags are mutually exclusive")
+ }
+ return clientv3.GetPrefixRangeEnd(args[0]), nil
+ }
+ if rolePermFromKey {
+ return "\x00", nil
+ }
+ // single key case
+ return "", nil
+ }
+ if rolePermPrefix {
+ return "", fmt.Errorf("unexpected endkey argument with --prefix flag")
+ }
+ if rolePermFromKey {
+ return "", fmt.Errorf("unexpected endkey argument with --from-key flag")
+ }
+ return args[1], nil
+}
diff --git a/etcdctl/ctlv3/command/over_user_command.go b/etcdctl/ctlv3/command/over_user_command.go
new file mode 100644
index 00000000000..23a06bdfa91
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_user_command.go
@@ -0,0 +1,298 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/bgentry/speakeasy"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
+ "github.com/spf13/cobra"
+)
+
+var userShowDetail bool
+
+// NewUserCommand returns the cobra command for "user".
+func NewUserCommand() *cobra.Command {
+ ac := &cobra.Command{
+ Use: "user ",
+ Short: "User related commands",
+ }
+
+ ac.AddCommand(newUserAddCommand())
+ ac.AddCommand(newUserDeleteCommand())
+ ac.AddCommand(newUserGetCommand())
+ ac.AddCommand(newUserListCommand())
+ ac.AddCommand(newUserChangePasswordCommand())
+ ac.AddCommand(newUserGrantRoleCommand())
+ ac.AddCommand(newUserRevokeRoleCommand())
+
+ return ac
+}
+
+var (
+ passwordInteractive bool
+ passwordFromFlag string
+ noPassword bool
+)
+
+func newUserAddCommand() *cobra.Command {
+ cmd := cobra.Command{
+ Use: "add [options]",
+ Short: "添加新用户",
+ Run: userAddCommandFunc,
+ }
+
+ cmd.Flags().BoolVar(&passwordInteractive, "interactive", true, "从stdin读取密码,而不是交互终端")
+ cmd.Flags().StringVar(&passwordFromFlag, "new-user-password", "", "从命令行标志提供密码")
+ cmd.Flags().BoolVar(&noPassword, "no-password", false, "创建一个没有密码的用户(仅基于CN的身份验证)")
+
+ return &cmd
+}
+
+func newUserDeleteCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "delete ",
+ Short: "删除一个用户",
+ Run: userDeleteCommandFunc,
+ }
+}
+
+func newUserGetCommand() *cobra.Command {
+ cmd := cobra.Command{
+ Use: "get [options]",
+ Short: "获取用户详情",
+ Run: userGetCommandFunc,
+ }
+
+ cmd.Flags().BoolVar(&userShowDetail, "detail", false, "显示授予用户的角色的权限")
+
+ return &cmd
+}
+
+func newUserListCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "list",
+ Short: "显示所有用户",
+ Run: userListCommandFunc,
+ }
+}
+
+func newUserChangePasswordCommand() *cobra.Command {
+ cmd := cobra.Command{
+ Use: "passwd [options]",
+ Short: "更改用户密码",
+ Run: userChangePasswordCommandFunc,
+ }
+
+ cmd.Flags().BoolVar(&passwordInteractive, "interactive", true, "如果为true,从stdin读取密码,而不是交互终端")
+
+ return &cmd
+}
+
+func newUserGrantRoleCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "grant-role ",
+ Short: "授予用户权限",
+ Run: userGrantRoleCommandFunc,
+ }
+}
+
+func newUserRevokeRoleCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "revoke-role ",
+ Short: "移除用户权限",
+ Run: userRevokeRoleCommandFunc,
+ }
+}
+
+func userAddCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户add命令需要用户名作为参数"))
+ }
+
+ var password string
+ var user string
+
+ options := &clientv3.UserAddOptions{
+ NoPassword: false,
+ }
+
+ if !noPassword { // 创建一个没有密码的用户(仅基于CN的身份验证)
+ if passwordFromFlag != "" {
+ user = args[0]
+ password = passwordFromFlag
+ } else {
+ splitted := strings.SplitN(args[0], ":", 2)
+ if len(splitted) < 2 {
+ user = args[0]
+ if !passwordInteractive {
+ fmt.Scanf("%s", &password)
+ } else {
+ password = readPasswordInteractive(args[0])
+ }
+ } else {
+ user = splitted[0]
+ password = splitted[1]
+ if len(user) == 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户名不允许为空"))
+ }
+ }
+ }
+ } else {
+ user = args[0]
+ options.NoPassword = true
+ }
+
+ resp, err := mustClientFromCmd(cmd).Auth.UserAddWithOptions(context.TODO(), user, password, options)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.UserAdd(user, *resp)
+}
+
+// userDeleteCommandFunc executes the "user delete" command.
+func userDeleteCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户删除命令需要用户名作为参数"))
+ }
+
+ resp, err := mustClientFromCmd(cmd).Auth.UserDelete(context.TODO(), args[0])
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ display.UserDelete(args[0], *resp)
+}
+
+// userGetCommandFunc executes the "user get" command.
+func userGetCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户get命令需要用户名作为参数"))
+ }
+
+ name := args[0]
+ client := mustClientFromCmd(cmd)
+ resp, err := client.Auth.UserGet(context.TODO(), name)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ if userShowDetail {
+ fmt.Printf("User: %s\n", name)
+ for _, role := range resp.Roles {
+ fmt.Printf("\n")
+ roleResp, err := client.Auth.RoleGet(context.TODO(), role)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ display.RoleGet(role, *roleResp)
+ }
+ } else {
+ display.UserGet(name, *resp)
+ }
+}
+
+// userListCommandFunc executes the "user list" command.
+func userListCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user list命令不需要参数"))
+ }
+
+ resp, err := mustClientFromCmd(cmd).Auth.UserList(context.TODO())
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.UserList(*resp)
+}
+
+// userChangePasswordCommandFunc executes the "user passwd" command.
+func userChangePasswordCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 1 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户passwd命令需要用户名作为参数"))
+ }
+
+ var password string
+
+ if !passwordInteractive {
+ fmt.Scanf("%s", &password)
+ } else {
+ password = readPasswordInteractive(args[0])
+ }
+
+ resp, err := mustClientFromCmd(cmd).Auth.UserChangePassword(context.TODO(), args[0], password)
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.UserChangePassword(*resp)
+}
+
+// userGrantRoleCommandFunc executes the "user grant-role" command.
+func userGrantRoleCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 2 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user grant命令需要用户名和角色名作为参数"))
+ }
+
+ resp, err := mustClientFromCmd(cmd).Auth.UserGrantRole(context.TODO(), args[0], args[1])
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.UserGrantRole(args[0], args[1], *resp)
+}
+
+// userRevokeRoleCommandFunc executes the "user revoke-role" command.
+func userRevokeRoleCommandFunc(cmd *cobra.Command, args []string) {
+ if len(args) != 2 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("用户revoke-role需要用户名和角色名作为参数"))
+ }
+
+ resp, err := mustClientFromCmd(cmd).Auth.UserRevokeRole(context.TODO(), args[0], args[1])
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+
+ display.UserRevokeRole(args[0], args[1], *resp)
+}
+
+func readPasswordInteractive(name string) string {
+ prompt1 := fmt.Sprintf("%s密码: ", name)
+ password1, err1 := speakeasy.Ask(prompt1)
+ if err1 != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("确认密码失败: %s", err1))
+ }
+
+ if len(password1) == 0 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("空密码"))
+ }
+
+ prompt2 := fmt.Sprintf("再次输入密码确认%s:", name)
+ password2, err2 := speakeasy.Ask(prompt2)
+ if err2 != nil {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("确认密码失败 %s", err2))
+ }
+
+ if password1 != password2 {
+ cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("提供的密码不一致"))
+ }
+
+ return password1
+}
diff --git a/etcdctl/ctlv3/command/over_version_command.go b/etcdctl/ctlv3/command/over_version_command.go
new file mode 100644
index 00000000000..1c8b28b6eed
--- /dev/null
+++ b/etcdctl/ctlv3/command/over_version_command.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+
+ "github.com/spf13/cobra"
+)
+
+// NewVersionCommand prints out the version of etcd.
+func NewVersionCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "version",
+ Short: "打印编译时的版本",
+ Run: versionCommandFunc,
+ }
+}
+
+func versionCommandFunc(cmd *cobra.Command, args []string) {
+ fmt.Println("etcdctl version:", version.Version)
+ fmt.Println("API version:", version.APIVersion)
+}
diff --git a/etcdctl/ctlv3/command/printer.go b/etcdctl/ctlv3/command/printer.go
index 7cc1b887b48..096c25bd456 100644
--- a/etcdctl/ctlv3/command/printer.go
+++ b/etcdctl/ctlv3/command/printer.go
@@ -19,11 +19,11 @@ import (
"fmt"
"strings"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-
"github.com/dustin/go-humanize"
+
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
)
type printer interface {
@@ -32,37 +32,27 @@ type printer interface {
Put(v3.PutResponse)
Txn(v3.TxnResponse)
Watch(v3.WatchResponse)
-
Grant(r v3.LeaseGrantResponse)
Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse)
KeepAlive(r v3.LeaseKeepAliveResponse)
TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool)
Leases(r v3.LeaseLeasesResponse)
-
MemberAdd(v3.MemberAddResponse)
MemberRemove(id uint64, r v3.MemberRemoveResponse)
MemberUpdate(id uint64, r v3.MemberUpdateResponse)
MemberPromote(id uint64, r v3.MemberPromoteResponse)
MemberList(v3.MemberListResponse)
-
EndpointHealth([]epHealth)
EndpointStatus([]epStatus)
EndpointHashKV([]epHashKV)
MoveLeader(leader, target uint64, r v3.MoveLeaderResponse)
-
- DowngradeValidate(r v3.DowngradeResponse)
- DowngradeEnable(r v3.DowngradeResponse)
- DowngradeCancel(r v3.DowngradeResponse)
-
Alarm(v3.AlarmResponse)
-
RoleAdd(role string, r v3.AuthRoleAddResponse)
RoleGet(role string, r v3.AuthRoleGetResponse)
RoleDelete(role string, r v3.AuthRoleDeleteResponse)
RoleList(v3.AuthRoleListResponse)
RoleGrantPermission(role string, r v3.AuthRoleGrantPermissionResponse)
RoleRevokePermission(role string, key string, end string, r v3.AuthRoleRevokePermissionResponse)
-
UserAdd(user string, r v3.AuthUserAddResponse)
UserGet(user string, r v3.AuthUserGetResponse)
UserList(r v3.AuthUserListResponse)
@@ -70,7 +60,6 @@ type printer interface {
UserGrantRole(user string, role string, r v3.AuthUserGrantRoleResponse)
UserRevokeRole(user string, role string, r v3.AuthUserRevokeRoleResponse)
UserDelete(user string, r v3.AuthUserDeleteResponse)
-
AuthStatus(r v3.AuthStatusResponse)
}
@@ -79,7 +68,7 @@ func NewPrinter(printerType string, isHex bool) printer {
case "simple":
return &simplePrinter{isHex: isHex}
case "fields":
- return &fieldsPrinter{printer: newPrinterUnsupported("fields"), isHex: isHex}
+ return &fieldsPrinter{newPrinterUnsupported("fields")}
case "json":
return newJSONPrinter(isHex)
case "protobuf":
@@ -111,22 +100,20 @@ func (p *printerRPC) MemberAdd(r v3.MemberAddResponse) { p.p((*pb.MemberAddRespo
func (p *printerRPC) MemberRemove(id uint64, r v3.MemberRemoveResponse) {
p.p((*pb.MemberRemoveResponse)(&r))
}
+
func (p *printerRPC) MemberUpdate(id uint64, r v3.MemberUpdateResponse) {
p.p((*pb.MemberUpdateResponse)(&r))
}
-func (p *printerRPC) MemberPromote(id uint64, r v3.MemberPromoteResponse) {
- p.p((*pb.MemberPromoteResponse)(&r))
-}
func (p *printerRPC) MemberList(r v3.MemberListResponse) { p.p((*pb.MemberListResponse)(&r)) }
func (p *printerRPC) Alarm(r v3.AlarmResponse) { p.p((*pb.AlarmResponse)(&r)) }
func (p *printerRPC) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) {
p.p((*pb.MoveLeaderResponse)(&r))
}
-func (p *printerRPC) DowngradeValidate(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) }
-func (p *printerRPC) DowngradeEnable(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) }
-func (p *printerRPC) DowngradeCancel(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) }
func (p *printerRPC) RoleAdd(_ string, r v3.AuthRoleAddResponse) { p.p((*pb.AuthRoleAddResponse)(&r)) }
-func (p *printerRPC) RoleGet(_ string, r v3.AuthRoleGetResponse) { p.p((*pb.AuthRoleGetResponse)(&r)) }
+func (p *printerRPC) RoleGet(_ string, r v3.AuthRoleGetResponse) {
+ p.p((*pb.AuthRoleGetResponse)(&r))
+}
+
func (p *printerRPC) RoleDelete(_ string, r v3.AuthRoleDeleteResponse) {
p.p((*pb.AuthRoleDeleteResponse)(&r))
}
@@ -134,6 +121,7 @@ func (p *printerRPC) RoleList(r v3.AuthRoleListResponse) { p.p((*pb.AuthRoleList
func (p *printerRPC) RoleGrantPermission(_ string, r v3.AuthRoleGrantPermissionResponse) {
p.p((*pb.AuthRoleGrantPermissionResponse)(&r))
}
+
func (p *printerRPC) RoleRevokePermission(_ string, _ string, _ string, r v3.AuthRoleRevokePermissionResponse) {
p.p((*pb.AuthRoleRevokePermissionResponse)(&r))
}
@@ -143,15 +131,19 @@ func (p *printerRPC) UserList(r v3.AuthUserListResponse) { p.p((*pb.Auth
func (p *printerRPC) UserChangePassword(r v3.AuthUserChangePasswordResponse) {
p.p((*pb.AuthUserChangePasswordResponse)(&r))
}
+
func (p *printerRPC) UserGrantRole(_ string, _ string, r v3.AuthUserGrantRoleResponse) {
p.p((*pb.AuthUserGrantRoleResponse)(&r))
}
+
func (p *printerRPC) UserRevokeRole(_ string, _ string, r v3.AuthUserRevokeRoleResponse) {
p.p((*pb.AuthUserRevokeRoleResponse)(&r))
}
+
func (p *printerRPC) UserDelete(_ string, r v3.AuthUserDeleteResponse) {
p.p((*pb.AuthUserDeleteResponse)(&r))
}
+
func (p *printerRPC) AuthStatus(r v3.AuthStatusResponse) {
p.p((*pb.AuthStatusResponse)(&r))
}
@@ -170,9 +162,6 @@ func (p *printerUnsupported) EndpointStatus([]epStatus) { p.p(nil) }
func (p *printerUnsupported) EndpointHashKV([]epHashKV) { p.p(nil) }
func (p *printerUnsupported) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) { p.p(nil) }
-func (p *printerUnsupported) DowngradeValidate(r v3.DowngradeResponse) { p.p(nil) }
-func (p *printerUnsupported) DowngradeEnable(r v3.DowngradeResponse) { p.p(nil) }
-func (p *printerUnsupported) DowngradeCancel(r v3.DowngradeResponse) { p.p(nil) }
func makeMemberListTable(r v3.MemberListResponse) (hdr []string, rows [][]string) {
hdr = []string{"ID", "Status", "Name", "Peer Addrs", "Client Addrs", "Is Learner"}
@@ -211,16 +200,16 @@ func makeEndpointHealthTable(healthList []epHealth) (hdr []string, rows [][]stri
}
func makeEndpointStatusTable(statusList []epStatus) (hdr []string, rows [][]string) {
- hdr = []string{"endpoint", "ID", "version", "storage version", "db size", "db size in use", "is leader", "is learner", "raft term",
- "raft index", "raft applied index", "errors"}
+ hdr = []string{
+ "endpoint", "ID", "version", "db size", "is leader", "is learner", "raft term",
+ "raft index", "raft applied index", "errors",
+ }
for _, status := range statusList {
rows = append(rows, []string{
status.Ep,
fmt.Sprintf("%x", status.Resp.Header.MemberId),
status.Resp.Version,
- status.Resp.StorageVersion,
humanize.Bytes(uint64(status.Resp.DbSize)),
- humanize.Bytes(uint64(status.Resp.DbSizeInUse)),
fmt.Sprint(status.Resp.Leader == status.Resp.Header.MemberId),
fmt.Sprint(status.Resp.IsLearner),
fmt.Sprint(status.Resp.RaftTerm),
@@ -233,12 +222,11 @@ func makeEndpointStatusTable(statusList []epStatus) (hdr []string, rows [][]stri
}
func makeEndpointHashKVTable(hashList []epHashKV) (hdr []string, rows [][]string) {
- hdr = []string{"endpoint", "hash", "hash_revision"}
+ hdr = []string{"endpoint", "hash"}
for _, h := range hashList {
rows = append(rows, []string{
h.Ep,
fmt.Sprint(h.Resp.Hash),
- fmt.Sprint(h.Resp.HashRevision),
})
}
return hdr, rows
diff --git a/etcdctl/ctlv3/command/printer_fields.go b/etcdctl/ctlv3/command/printer_fields.go
index 5e7d9258425..095ebaa740e 100644
--- a/etcdctl/ctlv3/command/printer_fields.go
+++ b/etcdctl/ctlv3/command/printer_fields.go
@@ -17,16 +17,12 @@ package command
import (
"fmt"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- spb "go.etcd.io/etcd/api/v3/mvccpb"
- "go.etcd.io/etcd/client/pkg/v3/types"
- v3 "go.etcd.io/etcd/client/v3"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ spb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
)
-type fieldsPrinter struct {
- printer
- isHex bool
-}
+type fieldsPrinter struct{ printer }
func (p *fieldsPrinter) kv(pfx string, kv *spb.KeyValue) {
fmt.Printf("\"%sKey\" : %q\n", pfx, string(kv.Key))
@@ -34,27 +30,13 @@ func (p *fieldsPrinter) kv(pfx string, kv *spb.KeyValue) {
fmt.Printf("\"%sModRevision\" : %d\n", pfx, kv.ModRevision)
fmt.Printf("\"%sVersion\" : %d\n", pfx, kv.Version)
fmt.Printf("\"%sValue\" : %q\n", pfx, string(kv.Value))
- if p.isHex {
- fmt.Printf("\"%sLease\" : %016x\n", pfx, kv.Lease)
- } else {
- fmt.Printf("\"%sLease\" : %d\n", pfx, kv.Lease)
- }
+ fmt.Printf("\"%sLease\" : %d\n", pfx, kv.Lease)
}
func (p *fieldsPrinter) hdr(h *pb.ResponseHeader) {
- if p.isHex {
- fmt.Println(`"ClusterID" :`, types.ID(h.ClusterId))
- fmt.Println(`"MemberID" :`, types.ID(h.MemberId))
- } else {
- fmt.Println(`"ClusterID" :`, h.ClusterId)
- fmt.Println(`"MemberID" :`, h.MemberId)
- }
- // Revision only makes sense for k/v responses. For other kinds of
- // responses, i.e. MemberList, usually the revision isn't populated
- // at all; so it would be better to hide this field in these cases.
- if h.Revision > 0 {
- fmt.Println(`"Revision" :`, h.Revision)
- }
+ fmt.Println(`"ClusterID" :`, h.ClusterId)
+ fmt.Println(`"MemberID" :`, h.MemberId)
+ fmt.Println(`"Revision" :`, h.Revision)
fmt.Println(`"RaftTerm" :`, h.RaftTerm)
}
@@ -85,16 +67,18 @@ func (p *fieldsPrinter) Put(r v3.PutResponse) {
func (p *fieldsPrinter) Txn(r v3.TxnResponse) {
p.hdr(r.Header)
fmt.Println(`"Succeeded" :`, r.Succeeded)
- for _, resp := range r.Responses {
- switch v := resp.Response.(type) {
- case *pb.ResponseOp_ResponseDeleteRange:
+ for _, r := range r.Responses {
+ if r.ResponseOp_ResponseDeleteRange != nil {
+ v := r.ResponseOp_ResponseDeleteRange
p.Del((v3.DeleteResponse)(*v.ResponseDeleteRange))
- case *pb.ResponseOp_ResponsePut:
+ } else if r.ResponseOp_ResponsePut != nil {
+ v := r.ResponseOp_ResponsePut
p.Put((v3.PutResponse)(*v.ResponsePut))
- case *pb.ResponseOp_ResponseRange:
+ } else if r.ResponseOp_ResponseRange != nil {
+ v := r.ResponseOp_ResponseRange
p.Get((v3.GetResponse)(*v.ResponseRange))
- default:
- fmt.Printf("\"Unknown\" : %q\n", fmt.Sprintf("%+v", v))
+ } else {
+ fmt.Printf("unexpected response %+v\n", r)
}
}
}
@@ -112,11 +96,7 @@ func (p *fieldsPrinter) Watch(resp v3.WatchResponse) {
func (p *fieldsPrinter) Grant(r v3.LeaseGrantResponse) {
p.hdr(r.ResponseHeader)
- if p.isHex {
- fmt.Printf("\"ID\" : %016x\n", r.ID)
- } else {
- fmt.Println(`"ID" :`, r.ID)
- }
+ fmt.Println(`"ID" :`, r.ID)
fmt.Println(`"TTL" :`, r.TTL)
}
@@ -126,21 +106,13 @@ func (p *fieldsPrinter) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) {
func (p *fieldsPrinter) KeepAlive(r v3.LeaseKeepAliveResponse) {
p.hdr(r.ResponseHeader)
- if p.isHex {
- fmt.Printf("\"ID\" : %016x\n", r.ID)
- } else {
- fmt.Println(`"ID" :`, r.ID)
- }
+ fmt.Println(`"ID" :`, r.ID)
fmt.Println(`"TTL" :`, r.TTL)
}
func (p *fieldsPrinter) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) {
p.hdr(r.ResponseHeader)
- if p.isHex {
- fmt.Printf("\"ID\" : %016x\n", r.ID)
- } else {
- fmt.Println(`"ID" :`, r.ID)
- }
+ fmt.Println(`"ID" :`, r.ID)
fmt.Println(`"TTL" :`, r.TTL)
fmt.Println(`"GrantedTTL" :`, r.GrantedTTL)
for _, k := range r.Keys {
@@ -151,22 +123,14 @@ func (p *fieldsPrinter) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) {
func (p *fieldsPrinter) Leases(r v3.LeaseLeasesResponse) {
p.hdr(r.ResponseHeader)
for _, item := range r.Leases {
- if p.isHex {
- fmt.Printf("\"ID\" : %016x\n", item.ID)
- } else {
- fmt.Println(`"ID" :`, item.ID)
- }
+ fmt.Println(`"ID" :`, item.ID)
}
}
func (p *fieldsPrinter) MemberList(r v3.MemberListResponse) {
p.hdr(r.Header)
for _, m := range r.Members {
- if p.isHex {
- fmt.Println(`"ID" :`, types.ID(m.ID))
- } else {
- fmt.Println(`"ID" :`, m.ID)
- }
+ fmt.Println(`"ID" :`, m.ID)
fmt.Printf("\"Name\" : %q\n", m.Name)
for _, u := range m.PeerURLs {
fmt.Printf("\"PeerURL\" : %q\n", u)
@@ -193,9 +157,7 @@ func (p *fieldsPrinter) EndpointStatus(eps []epStatus) {
for _, ep := range eps {
p.hdr(ep.Resp.Header)
fmt.Printf("\"Version\" : %q\n", ep.Resp.Version)
- fmt.Printf("\"StorageVersion\" : %q\n", ep.Resp.StorageVersion)
fmt.Println(`"DBSize" :`, ep.Resp.DbSize)
- fmt.Println(`"DBSizeInUse" :`, ep.Resp.DbSizeInUse)
fmt.Println(`"Leader" :`, ep.Resp.Leader)
fmt.Println(`"IsLearner" :`, ep.Resp.IsLearner)
fmt.Println(`"RaftIndex" :`, ep.Resp.RaftIndex)
@@ -212,7 +174,6 @@ func (p *fieldsPrinter) EndpointHashKV(hs []epHashKV) {
p.hdr(h.Resp.Header)
fmt.Printf("\"Endpoint\" : %q\n", h.Ep)
fmt.Println(`"Hash" :`, h.Resp.Hash)
- fmt.Println(`"HashRevision" :`, h.Resp.HashRevision)
fmt.Println()
}
}
@@ -220,11 +181,7 @@ func (p *fieldsPrinter) EndpointHashKV(hs []epHashKV) {
func (p *fieldsPrinter) Alarm(r v3.AlarmResponse) {
p.hdr(r.Header)
for _, a := range r.Alarms {
- if p.isHex {
- fmt.Println(`"MemberID" :`, types.ID(a.MemberID))
- } else {
- fmt.Println(`"MemberID" :`, a.MemberID)
- }
+ fmt.Println(`"MemberID" :`, a.MemberID)
fmt.Println(`"AlarmType" :`, a.Alarm)
fmt.Println()
}
@@ -242,15 +199,17 @@ func (p *fieldsPrinter) RoleGet(role string, r v3.AuthRoleGetResponse) {
func (p *fieldsPrinter) RoleDelete(role string, r v3.AuthRoleDeleteResponse) { p.hdr(r.Header) }
func (p *fieldsPrinter) RoleList(r v3.AuthRoleListResponse) {
p.hdr(r.Header)
- fmt.Print(`"Roles" :`)
+ fmt.Printf(`"Roles" :`)
for _, r := range r.Roles {
fmt.Printf(" %q", r)
}
fmt.Println()
}
+
func (p *fieldsPrinter) RoleGrantPermission(role string, r v3.AuthRoleGrantPermissionResponse) {
p.hdr(r.Header)
}
+
func (p *fieldsPrinter) RoleRevokePermission(role string, key string, end string, r v3.AuthRoleRevokePermissionResponse) {
p.hdr(r.Header)
}
@@ -259,6 +218,7 @@ func (p *fieldsPrinter) UserChangePassword(r v3.AuthUserChangePasswordResponse)
func (p *fieldsPrinter) UserGrantRole(user string, role string, r v3.AuthUserGrantRoleResponse) {
p.hdr(r.Header)
}
+
func (p *fieldsPrinter) UserRevokeRole(user string, role string, r v3.AuthUserRevokeRoleResponse) {
p.hdr(r.Header)
}
diff --git a/etcdctl/ctlv3/command/printer_json.go b/etcdctl/ctlv3/command/printer_json.go
index c97fc69876a..896435549a3 100644
--- a/etcdctl/ctlv3/command/printer_json.go
+++ b/etcdctl/ctlv3/command/printer_json.go
@@ -21,7 +21,7 @@ import (
"os"
"strconv"
- clientv3 "go.etcd.io/etcd/client/v3"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
)
type jsonPrinter struct {
@@ -37,7 +37,9 @@ func newJSONPrinter(isHex bool) printer {
}
func (p *jsonPrinter) EndpointHealth(r []epHealth) { printJSON(r) }
-func (p *jsonPrinter) EndpointStatus(r []epStatus) { printJSON(r) }
+func (p *jsonPrinter) EndpointStatus(r []epStatus) {
+ printJSON(r)
+}
func (p *jsonPrinter) EndpointHashKV(r []epHashKV) { printJSON(r) }
func (p *jsonPrinter) MemberList(r clientv3.MemberListResponse) {
@@ -67,7 +69,7 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) {
b = strconv.AppendUint(nil, r.Header.MemberId, 16)
buffer.Write(b)
buffer.WriteString("\",\"raft_term\":")
- b = strconv.AppendUint(nil, r.Header.RaftTerm, 10)
+ b = strconv.AppendUint(nil, r.Header.RaftTerm, 16)
buffer.Write(b)
buffer.WriteByte('}')
for i := 0; i < len(r.Members); i++ {
@@ -84,7 +86,7 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) {
return
}
buffer.Write(b)
- buffer.WriteString(",\"clientURLs\":")
+ buffer.WriteString(",\"clientURLS\":")
b, err = json.Marshal(r.Members[i].ClientURLs)
if err != nil {
return
@@ -97,5 +99,4 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) {
}
buffer.WriteString("}")
fmt.Println(buffer.String())
-
}
diff --git a/etcdctl/ctlv3/command/printer_protobuf.go b/etcdctl/ctlv3/command/printer_protobuf.go
index da1da9f3441..25fa0ea186f 100644
--- a/etcdctl/ctlv3/command/printer_protobuf.go
+++ b/etcdctl/ctlv3/command/printer_protobuf.go
@@ -18,10 +18,10 @@ import (
"fmt"
"os"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- mvccpb "go.etcd.io/etcd/api/v3/mvccpb"
- v3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ mvccpb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
)
type pbPrinter struct{ printer }
diff --git a/etcdctl/ctlv3/command/printer_simple.go b/etcdctl/ctlv3/command/printer_simple.go
index 80f3bc9b92a..c992807c6ae 100644
--- a/etcdctl/ctlv3/command/printer_simple.go
+++ b/etcdctl/ctlv3/command/printer_simple.go
@@ -19,13 +19,10 @@ import (
"os"
"strings"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/types"
- v3 "go.etcd.io/etcd/client/v3"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
)
-const rootRole = "root"
-
type simplePrinter struct {
isHex bool
valueOnly bool
@@ -60,14 +57,16 @@ func (s *simplePrinter) Txn(resp v3.TxnResponse) {
for _, r := range resp.Responses {
fmt.Println("")
- switch v := r.Response.(type) {
- case *pb.ResponseOp_ResponseDeleteRange:
+ if r.ResponseOp_ResponseDeleteRange != nil {
+ v := r.ResponseOp_ResponseDeleteRange
s.Del((v3.DeleteResponse)(*v.ResponseDeleteRange))
- case *pb.ResponseOp_ResponsePut:
+ } else if r.ResponseOp_ResponsePut != nil {
+ v := r.ResponseOp_ResponsePut
s.Put((v3.PutResponse)(*v.ResponsePut))
- case *pb.ResponseOp_ResponseRange:
- s.Get(((v3.GetResponse)(*v.ResponseRange)))
- default:
+ } else if r.ResponseOp_ResponseRange != nil {
+ v := r.ResponseOp_ResponseRange
+ s.Get((v3.GetResponse)(*v.ResponseRange))
+ } else {
fmt.Printf("unexpected response %+v\n", r)
}
}
@@ -109,7 +108,7 @@ func (s *simplePrinter) TimeToLive(resp v3.LeaseTimeToLiveResponse, keys bool) {
}
txt += fmt.Sprintf(", attached keys(%v)", ks)
}
- fmt.Println(txt)
+ fmt.Println("TimeToLive--->", txt)
}
func (s *simplePrinter) Leases(resp v3.LeaseLeasesResponse) {
@@ -126,11 +125,7 @@ func (s *simplePrinter) Alarm(resp v3.AlarmResponse) {
}
func (s *simplePrinter) MemberAdd(r v3.MemberAddResponse) {
- asLearner := " "
- if r.Member.IsLearner {
- asLearner = " as learner "
- }
- fmt.Printf("Member %16x added%sto cluster %16x\n", r.Member.ID, asLearner, r.Header.ClusterId)
+ fmt.Printf("Member %16x added to cluster %16x\n", r.Member.ID, r.Header.ClusterId)
}
func (s *simplePrinter) MemberRemove(id uint64, r v3.MemberRemoveResponse) {
@@ -155,9 +150,9 @@ func (s *simplePrinter) MemberList(resp v3.MemberListResponse) {
func (s *simplePrinter) EndpointHealth(hs []epHealth) {
for _, h := range hs {
if h.Error == "" {
- fmt.Printf("%s is healthy: successfully committed proposal: took = %v\n", h.Ep, h.Took)
+ fmt.Printf("%s 健康:propose 成功: took = %v\n", h.Ep, h.Took)
} else {
- fmt.Fprintf(os.Stderr, "%s is unhealthy: failed to commit proposal: %v\n", h.Ep, h.Error)
+ fmt.Fprintf(os.Stderr, "%s 不健康:propose 失败: %v\n", h.Ep, h.Error)
}
}
}
@@ -180,60 +175,42 @@ func (s *simplePrinter) MoveLeader(leader, target uint64, r v3.MoveLeaderRespons
fmt.Printf("Leadership transferred from %s to %s\n", types.ID(leader), types.ID(target))
}
-func (s *simplePrinter) DowngradeValidate(r v3.DowngradeResponse) {
- fmt.Printf("Downgrade validate success, cluster version %s\n", r.Version)
-}
-func (s *simplePrinter) DowngradeEnable(r v3.DowngradeResponse) {
- fmt.Printf("Downgrade enable success, cluster version %s\n", r.Version)
-}
-func (s *simplePrinter) DowngradeCancel(r v3.DowngradeResponse) {
- fmt.Printf("Downgrade cancel success, cluster version %s\n", r.Version)
-}
-
func (s *simplePrinter) RoleAdd(role string, r v3.AuthRoleAddResponse) {
- fmt.Printf("Role %s created\n", role)
+ fmt.Printf("角色 %s 已创建\n", role)
}
func (s *simplePrinter) RoleGet(role string, r v3.AuthRoleGetResponse) {
fmt.Printf("Role %s\n", role)
- if rootRole == role && r.Perm == nil {
- fmt.Println("KV Read:")
- fmt.Println("\t[, ")
- fmt.Println("KV Write:")
- fmt.Println("\t[, ")
- return
- }
-
- fmt.Println("KV Read:")
+ fmt.Println("---->KV Read:")
printRange := func(perm *v3.Permission) {
- sKey := string(perm.Key)
- sRangeEnd := string(perm.RangeEnd)
+ sKey := perm.Key
+ sRangeEnd := perm.RangeEnd
if sRangeEnd != "\x00" {
fmt.Printf("\t[%s, %s)", sKey, sRangeEnd)
} else {
fmt.Printf("\t[%s, ", sKey)
}
- if v3.GetPrefixRangeEnd(sKey) == sRangeEnd && len(sKey) > 0 {
+ if v3.GetPrefixRangeEnd(sKey) == sRangeEnd {
fmt.Printf(" (prefix %s)", sKey)
}
- fmt.Print("\n")
+ fmt.Printf("\n")
}
for _, perm := range r.Perm {
if perm.PermType == v3.PermRead || perm.PermType == v3.PermReadWrite {
if len(perm.RangeEnd) == 0 {
- fmt.Printf("\t%s\n", string(perm.Key))
+ fmt.Printf("\t%s\n", perm.Key)
} else {
printRange((*v3.Permission)(perm))
}
}
}
- fmt.Println("KV Write:")
+ fmt.Println("---->KV Write:")
for _, perm := range r.Perm {
if perm.PermType == v3.PermWrite || perm.PermType == v3.PermReadWrite {
if len(perm.RangeEnd) == 0 {
- fmt.Printf("\t%s\n", string(perm.Key))
+ fmt.Printf("\t%s\n", perm.Key)
} else {
printRange((*v3.Permission)(perm))
}
@@ -248,11 +225,11 @@ func (s *simplePrinter) RoleList(r v3.AuthRoleListResponse) {
}
func (s *simplePrinter) RoleDelete(role string, r v3.AuthRoleDeleteResponse) {
- fmt.Printf("Role %s deleted\n", role)
+ fmt.Printf("角色 %s 删除了\n", role)
}
func (s *simplePrinter) RoleGrantPermission(role string, r v3.AuthRoleGrantPermissionResponse) {
- fmt.Printf("Role %s updated\n", role)
+ fmt.Printf("角色 %s 已更新\n", role)
}
func (s *simplePrinter) RoleRevokePermission(role string, key string, end string, r v3.AuthRoleRevokePermissionResponse) {
@@ -273,27 +250,27 @@ func (s *simplePrinter) UserAdd(name string, r v3.AuthUserAddResponse) {
func (s *simplePrinter) UserGet(name string, r v3.AuthUserGetResponse) {
fmt.Printf("User: %s\n", name)
- fmt.Print("Roles:")
+ fmt.Printf("Roles:")
for _, role := range r.Roles {
fmt.Printf(" %s", role)
}
- fmt.Print("\n")
+ fmt.Printf("\n")
}
func (s *simplePrinter) UserChangePassword(v3.AuthUserChangePasswordResponse) {
- fmt.Println("Password updated")
+ fmt.Println("密码已更新")
}
func (s *simplePrinter) UserGrantRole(user string, role string, r v3.AuthUserGrantRoleResponse) {
- fmt.Printf("Role %s is granted to user %s\n", role, user)
+ fmt.Printf("角色 %s 授予了用户 %s\n", role, user)
}
func (s *simplePrinter) UserRevokeRole(user string, role string, r v3.AuthUserRevokeRoleResponse) {
- fmt.Printf("Role %s is revoked from user %s\n", role, user)
+ fmt.Printf("用户%s移除了角色 %s \n", user, role)
}
func (s *simplePrinter) UserDelete(user string, r v3.AuthUserDeleteResponse) {
- fmt.Printf("User %s deleted\n", user)
+ fmt.Printf("用户 %s 已删除\n", user)
}
func (s *simplePrinter) UserList(r v3.AuthUserListResponse) {
@@ -303,6 +280,6 @@ func (s *simplePrinter) UserList(r v3.AuthUserListResponse) {
}
func (s *simplePrinter) AuthStatus(r v3.AuthStatusResponse) {
- fmt.Println("Authentication Status:", r.Enabled)
- fmt.Println("AuthRevision:", r.AuthRevision)
+ fmt.Println("身份认证是否开启:", r.Enabled)
+ fmt.Println("验证版本:", r.AuthRevision)
}
diff --git a/etcdctl/ctlv3/command/printer_table.go b/etcdctl/ctlv3/command/printer_table.go
index 2bc6cfcf603..80fee9f3d79 100644
--- a/etcdctl/ctlv3/command/printer_table.go
+++ b/etcdctl/ctlv3/command/printer_table.go
@@ -17,7 +17,7 @@ package command
import (
"os"
- v3 "go.etcd.io/etcd/client/v3"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
"github.com/olekukonko/tablewriter"
)
@@ -34,6 +34,7 @@ func (tp *tablePrinter) MemberList(r v3.MemberListResponse) {
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.Render()
}
+
func (tp *tablePrinter) EndpointHealth(r []epHealth) {
hdr, rows := makeEndpointHealthTable(r)
table := tablewriter.NewWriter(os.Stdout)
@@ -44,6 +45,7 @@ func (tp *tablePrinter) EndpointHealth(r []epHealth) {
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.Render()
}
+
func (tp *tablePrinter) EndpointStatus(r []epStatus) {
hdr, rows := makeEndpointStatusTable(r)
table := tablewriter.NewWriter(os.Stdout)
@@ -54,6 +56,7 @@ func (tp *tablePrinter) EndpointStatus(r []epStatus) {
table.SetAlignment(tablewriter.ALIGN_RIGHT)
table.Render()
}
+
func (tp *tablePrinter) EndpointHashKV(r []epHashKV) {
hdr, rows := makeEndpointHashKVTable(r)
table := tablewriter.NewWriter(os.Stdout)
diff --git a/etcdctl/ctlv3/command/put_command.go b/etcdctl/ctlv3/command/put_command.go
deleted file mode 100644
index 1d207bb8388..00000000000
--- a/etcdctl/ctlv3/command/put_command.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
- "os"
- "strconv"
-
- "github.com/spf13/cobra"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-var (
- leaseStr string
- putPrevKV bool
- putIgnoreVal bool
- putIgnoreLease bool
-)
-
-// NewPutCommand returns the cobra command for "put".
-func NewPutCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "put [options] ( can also be given from stdin)",
- Short: "Puts the given key into the store",
- Long: `
-Puts the given key into the store.
-
-When begins with '-', is interpreted as a flag.
-Insert '--' for workaround:
-
-$ put --
-$ put --
-
-If isn't given as a command line argument and '--ignore-value' is not specified,
-this command tries to read the value from standard input.
-
-If isn't given as a command line argument and '--ignore-lease' is not specified,
-this command tries to read the value from standard input.
-
-For example,
-$ cat file | put
-will store the content of the file to .
-`,
- Run: putCommandFunc,
- }
- cmd.Flags().StringVar(&leaseStr, "lease", "0", "lease ID (in hexadecimal) to attach to the key")
- cmd.Flags().BoolVar(&putPrevKV, "prev-kv", false, "return the previous key-value pair before modification")
- cmd.Flags().BoolVar(&putIgnoreVal, "ignore-value", false, "updates the key using its current value")
- cmd.Flags().BoolVar(&putIgnoreLease, "ignore-lease", false, "updates the key using its current lease")
- return cmd
-}
-
-// putCommandFunc executes the "put" command.
-func putCommandFunc(cmd *cobra.Command, args []string) {
- key, value, opts := getPutOp(args)
-
- ctx, cancel := commandCtx(cmd)
- resp, err := mustClientFromCmd(cmd).Put(ctx, key, value, opts...)
- cancel()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- display.Put(*resp)
-}
-
-func getPutOp(args []string) (string, string, []clientv3.OpOption) {
- if len(args) == 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments"))
- }
-
- key := args[0]
- if putIgnoreVal && len(args) > 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs only 1 argument when 'ignore-value' is set"))
- }
-
- var value string
- var err error
- if !putIgnoreVal {
- value, err = argOrStdin(args, os.Stdin, 1)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("put command needs 1 argument and input from stdin or 2 arguments"))
- }
- }
-
- id, err := strconv.ParseInt(leaseStr, 16, 64)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("bad lease ID (%v), expecting ID in Hex", err))
- }
-
- var opts []clientv3.OpOption
- if id != 0 {
- opts = append(opts, clientv3.WithLease(clientv3.LeaseID(id)))
- }
- if putPrevKV {
- opts = append(opts, clientv3.WithPrevKV())
- }
- if putIgnoreVal {
- opts = append(opts, clientv3.WithIgnoreValue())
- }
- if putIgnoreLease {
- opts = append(opts, clientv3.WithIgnoreLease())
- }
-
- return key, value, opts
-}
diff --git a/etcdctl/ctlv3/command/role_command.go b/etcdctl/ctlv3/command/role_command.go
deleted file mode 100644
index 705d88d886f..00000000000
--- a/etcdctl/ctlv3/command/role_command.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "context"
- "fmt"
-
- "github.com/spf13/cobra"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-var (
- rolePermPrefix bool
- rolePermFromKey bool
-)
-
-// NewRoleCommand returns the cobra command for "role".
-func NewRoleCommand() *cobra.Command {
- ac := &cobra.Command{
- Use: "role ",
- Short: "Role related commands",
- }
-
- ac.AddCommand(newRoleAddCommand())
- ac.AddCommand(newRoleDeleteCommand())
- ac.AddCommand(newRoleGetCommand())
- ac.AddCommand(newRoleListCommand())
- ac.AddCommand(newRoleGrantPermissionCommand())
- ac.AddCommand(newRoleRevokePermissionCommand())
-
- return ac
-}
-
-func newRoleAddCommand() *cobra.Command {
- return &cobra.Command{
- Use: "add ",
- Short: "Adds a new role",
- Run: roleAddCommandFunc,
- }
-}
-
-func newRoleDeleteCommand() *cobra.Command {
- return &cobra.Command{
- Use: "delete ",
- Short: "Deletes a role",
- Run: roleDeleteCommandFunc,
- }
-}
-
-func newRoleGetCommand() *cobra.Command {
- return &cobra.Command{
- Use: "get ",
- Short: "Gets detailed information of a role",
- Run: roleGetCommandFunc,
- }
-}
-
-func newRoleListCommand() *cobra.Command {
- return &cobra.Command{
- Use: "list",
- Short: "Lists all roles",
- Run: roleListCommandFunc,
- }
-}
-
-func newRoleGrantPermissionCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "grant-permission [options] [endkey]",
- Short: "Grants a key to a role",
- Run: roleGrantPermissionCommandFunc,
- }
-
- cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "grant a prefix permission")
- cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "grant a permission of keys that are greater than or equal to the given key using byte compare")
-
- return cmd
-}
-
-func newRoleRevokePermissionCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "revoke-permission [endkey]",
- Short: "Revokes a key from a role",
- Run: roleRevokePermissionCommandFunc,
- }
-
- cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "revoke a prefix permission")
- cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "revoke a permission of keys that are greater than or equal to the given key using byte compare")
-
- return cmd
-}
-
-// roleAddCommandFunc executes the "role add" command.
-func roleAddCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role add command requires role name as its argument"))
- }
-
- resp, err := mustClientFromCmd(cmd).Auth.RoleAdd(context.TODO(), args[0])
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.RoleAdd(args[0], *resp)
-}
-
-// roleDeleteCommandFunc executes the "role delete" command.
-func roleDeleteCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role delete command requires role name as its argument"))
- }
-
- resp, err := mustClientFromCmd(cmd).Auth.RoleDelete(context.TODO(), args[0])
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.RoleDelete(args[0], *resp)
-}
-
-// roleGetCommandFunc executes the "role get" command.
-func roleGetCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role get command requires role name as its argument"))
- }
-
- name := args[0]
- resp, err := mustClientFromCmd(cmd).Auth.RoleGet(context.TODO(), name)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.RoleGet(name, *resp)
-}
-
-// roleListCommandFunc executes the "role list" command.
-func roleListCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role list command requires no arguments"))
- }
-
- resp, err := mustClientFromCmd(cmd).Auth.RoleList(context.TODO())
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.RoleList(*resp)
-}
-
-// roleGrantPermissionCommandFunc executes the "role grant-permission" command.
-func roleGrantPermissionCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) < 3 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role grant command requires role name, permission type, and key [endkey] as its argument"))
- }
-
- perm, err := clientv3.StrToPermissionType(args[1])
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
- }
-
- key, rangeEnd := permRange(args[2:])
- resp, err := mustClientFromCmd(cmd).Auth.RoleGrantPermission(context.TODO(), args[0], key, rangeEnd, perm)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.RoleGrantPermission(args[0], *resp)
-}
-
-// roleRevokePermissionCommandFunc executes the "role revoke-permission" command.
-func roleRevokePermissionCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) < 2 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("role revoke-permission command requires role name and key [endkey] as its argument"))
- }
-
- key, rangeEnd := permRange(args[1:])
- resp, err := mustClientFromCmd(cmd).Auth.RoleRevokePermission(context.TODO(), args[0], key, rangeEnd)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- display.RoleRevokePermission(args[0], args[1], rangeEnd, *resp)
-}
-
-func permRange(args []string) (string, string) {
- key := args[0]
- var rangeEnd string
- if len(key) == 0 {
- if rolePermPrefix && rolePermFromKey {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("--from-key and --prefix flags are mutually exclusive"))
- }
-
- // Range permission is expressed as adt.BytesAffineInterval,
- // so the empty prefix which should be matched with every key must be like this ["\x00", ).
- key = "\x00"
- if rolePermPrefix || rolePermFromKey {
- // For the both cases of prefix and from-key, a permission with an empty key
- // should allow access to the entire key space.
- // 0x00 will be treated as open ended in server side.
- rangeEnd = "\x00"
- }
- } else {
- var err error
- rangeEnd, err = rangeEndFromPermFlags(args[0:])
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
- }
- }
- return key, rangeEnd
-}
-
-func rangeEndFromPermFlags(args []string) (string, error) {
- if len(args) == 1 {
- if rolePermPrefix {
- if rolePermFromKey {
- return "", fmt.Errorf("--from-key and --prefix flags are mutually exclusive")
- }
- return clientv3.GetPrefixRangeEnd(args[0]), nil
- }
- if rolePermFromKey {
- return "\x00", nil
- }
- // single key case
- return "", nil
- }
- if rolePermPrefix {
- return "", fmt.Errorf("unexpected endkey argument with --prefix flag")
- }
- if rolePermFromKey {
- return "", fmt.Errorf("unexpected endkey argument with --from-key flag")
- }
- return args[1], nil
-}
diff --git a/etcdctl/ctlv3/command/snapshot_command.go b/etcdctl/ctlv3/command/snapshot_command.go
index df317e23cc7..5d9cc98d26e 100644
--- a/etcdctl/ctlv3/command/snapshot_command.go
+++ b/etcdctl/ctlv3/command/snapshot_command.go
@@ -17,13 +17,28 @@ package command
import (
"context"
"fmt"
+ "os"
+ snapshot "github.com/ls-2018/etcd_cn/client_sdk/v3/snapshot"
+ "github.com/ls-2018/etcd_cn/etcdutl/etcdutl"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
"go.uber.org/zap"
+)
+
+const (
+ defaultName = "default"
+ defaultInitialAdvertisePeerURLs = "http://localhost:2380"
+)
- "go.etcd.io/etcd/client/pkg/v3/logutil"
- snapshot "go.etcd.io/etcd/client/v3/snapshot"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+var (
+ restoreCluster string
+ restoreClusterToken string
+ restoreDataDir string
+ restoreWalDir string
+ restorePeerURLs string
+ restoreName string
+ skipHashCheck bool
)
// NewSnapshotCommand returns the cobra command for "snapshot".
@@ -33,24 +48,56 @@ func NewSnapshotCommand() *cobra.Command {
Short: "Manages etcd node snapshots",
}
cmd.AddCommand(NewSnapshotSaveCommand())
+ cmd.AddCommand(NewSnapshotRestoreCommand())
+ cmd.AddCommand(newSnapshotStatusCommand())
return cmd
}
func NewSnapshotSaveCommand() *cobra.Command {
return &cobra.Command{
Use: "save ",
- Short: "Stores an etcd node backend snapshot to a given file",
+ Short: "将etcd节点后端快照存储到给定的文件",
Run: snapshotSaveCommandFunc,
}
}
+func newSnapshotStatusCommand() *cobra.Command {
+ return &cobra.Command{
+ Use: "status ",
+ Short: "[deprecated] 从给定的文件获取快照状态",
+ Long: `When --write-out is set to simple, this command prints out comma-separated status lists for each endpoint.
+The items in the lists are hash, revision, total keys, total size.
+
+Moved to 'etcdctl snapshot status ...'
+`,
+ Run: snapshotStatusCommandFunc,
+ }
+}
+
+func NewSnapshotRestoreCommand() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "restore [options]",
+ Short: "将etcd成员快照恢复到etcd目录",
+ Run: snapshotRestoreCommandFunc,
+ }
+ cmd.Flags().StringVar(&restoreDataDir, "data-dir", "", "数据目录")
+ cmd.Flags().StringVar(&restoreWalDir, "wal-dir", "", "wal目录 (use --data-dir if none given)")
+ cmd.Flags().StringVar(&restoreCluster, "initial-cluster", initialClusterFromName(defaultName), "初始集群配置")
+ cmd.Flags().StringVar(&restoreClusterToken, "initial-cluster-token", "etcd-cluster", "在恢复引导过程中etcd集群的初始群集令牌")
+ cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "要通告给集群其他部分的该成员的对等url列表")
+ cmd.Flags().StringVar(&restoreName, "name", defaultName, "此成员的人类可读的名称")
+ cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "忽略快照完整性哈希值(从数据目录复制时需要)")
+
+ return cmd
+}
+
func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) {
if len(args) != 1 {
err := fmt.Errorf("snapshot save expects one argument")
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
}
- lg, err := logutil.CreateDefaultZapLogger(zap.InfoLevel)
+ lg, err := zap.NewProduction()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
@@ -64,12 +111,26 @@ func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) {
defer cancel()
path := args[0]
- version, err := snapshot.SaveWithVersion(ctx, lg, *cfg, path)
- if err != nil {
+ if err := snapshot.Save(ctx, lg, *cfg, path); err != nil {
cobrautl.ExitWithError(cobrautl.ExitInterrupted, err)
}
fmt.Printf("Snapshot saved at %s\n", path)
- if version != "" {
- fmt.Printf("Server version %s\n", version)
+}
+
+func snapshotStatusCommandFunc(cmd *cobra.Command, args []string) {
+ fmt.Fprintf(os.Stderr, "Deprecated: Use `etcdutl snapshot status` instead.\n\n")
+ etcdutl.SnapshotStatusCommandFunc(cmd, args)
+}
+
+func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) {
+ fmt.Fprintf(os.Stderr, "弃用: 使用 `etcdutl snapshot restore` \n\n")
+ etcdutl.SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir, restorePeerURLs, restoreName, skipHashCheck, args)
+}
+
+func initialClusterFromName(name string) string {
+ n := name
+ if name == "" {
+ n = defaultName
}
+ return fmt.Sprintf("%s=http://localhost:2380", n)
}
diff --git a/etcdctl/ctlv3/command/txn_command.go b/etcdctl/ctlv3/command/txn_command.go
index b05a7ea6614..2483ec6e9cb 100644
--- a/etcdctl/ctlv3/command/txn_command.go
+++ b/etcdctl/ctlv3/command/txn_command.go
@@ -22,9 +22,10 @@ import (
"strconv"
"strings"
- pb "go.etcd.io/etcd/api/v3/etcdserverpb"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ pb "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
)
@@ -34,8 +35,8 @@ var txnInteractive bool
// NewTxnCommand returns the cobra command for "txn".
func NewTxnCommand() *cobra.Command {
cmd := &cobra.Command{
- Use: "txn [options]",
- Short: "Txn processes all the requests in one transaction",
+ Use: "txn [options] ",
+ Short: "在一个事务里处理所有请求 c(\"a\") = \"22222\"",
Run: txnCommandFunc,
}
cmd.Flags().BoolVarP(&txnInteractive, "interactive", "i", false, "Input transaction in interactive mode")
@@ -68,7 +69,7 @@ func txnCommandFunc(cmd *cobra.Command, args []string) {
func promptInteractive(s string) {
if txnInteractive {
- fmt.Println(s)
+ fmt.Println("promptInteractive--->", s)
}
}
@@ -85,7 +86,7 @@ func readCompares(r *bufio.Reader) (cmps []clientv3.Cmp) {
break
}
- cmp, err := ParseCompare(line)
+ cmp, err := parseCompare(line)
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitInvalidInput, err)
}
@@ -119,7 +120,7 @@ func readOps(r *bufio.Reader) (ops []clientv3.Op) {
}
func parseRequestUnion(line string) (*clientv3.Op, error) {
- args := Argify(line)
+ args := argify(line)
if len(args) < 2 {
return nil, fmt.Errorf("invalid txn compare request: %s", line)
}
@@ -153,7 +154,7 @@ func parseRequestUnion(line string) (*clientv3.Op, error) {
return &op, nil
}
-func ParseCompare(line string) (*clientv3.Cmp, error) {
+func parseCompare(line string) (*clientv3.Cmp, error) {
var (
key string
op string
@@ -164,7 +165,7 @@ func ParseCompare(line string) (*clientv3.Cmp, error) {
if len(lparenSplit) != 2 {
return nil, fmt.Errorf("malformed comparison: %s", line)
}
-
+ // c("a") = "22222"
target := lparenSplit[0]
n, serr := fmt.Sscanf(lparenSplit[1], "%q) %s %q", &key, &op, &val)
if n != 3 {
diff --git a/etcdctl/ctlv3/command/user_command.go b/etcdctl/ctlv3/command/user_command.go
deleted file mode 100644
index ee3f55a2a47..00000000000
--- a/etcdctl/ctlv3/command/user_command.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/bgentry/speakeasy"
- "github.com/spf13/cobra"
-
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-)
-
-var (
- userShowDetail bool
-)
-
-// NewUserCommand returns the cobra command for "user".
-func NewUserCommand() *cobra.Command {
- ac := &cobra.Command{
- Use: "user ",
- Short: "User related commands",
- }
-
- ac.AddCommand(newUserAddCommand())
- ac.AddCommand(newUserDeleteCommand())
- ac.AddCommand(newUserGetCommand())
- ac.AddCommand(newUserListCommand())
- ac.AddCommand(newUserChangePasswordCommand())
- ac.AddCommand(newUserGrantRoleCommand())
- ac.AddCommand(newUserRevokeRoleCommand())
-
- return ac
-}
-
-var (
- passwordInteractive bool
- passwordFromFlag string
- noPassword bool
-)
-
-func newUserAddCommand() *cobra.Command {
- cmd := cobra.Command{
- Use: "add [options]",
- Short: "Adds a new user",
- Run: userAddCommandFunc,
- }
-
- cmd.Flags().BoolVar(&passwordInteractive, "interactive", true, "Read password from stdin instead of interactive terminal")
- cmd.Flags().StringVar(&passwordFromFlag, "new-user-password", "", "Supply password from the command line flag")
- cmd.Flags().BoolVar(&noPassword, "no-password", false, "Create a user without password (CN based auth only)")
-
- return &cmd
-}
-
-func newUserDeleteCommand() *cobra.Command {
- return &cobra.Command{
- Use: "delete ",
- Short: "Deletes a user",
- Run: userDeleteCommandFunc,
- }
-}
-
-func newUserGetCommand() *cobra.Command {
- cmd := cobra.Command{
- Use: "get [options]",
- Short: "Gets detailed information of a user",
- Run: userGetCommandFunc,
- }
-
- cmd.Flags().BoolVar(&userShowDetail, "detail", false, "Show permissions of roles granted to the user")
-
- return &cmd
-}
-
-func newUserListCommand() *cobra.Command {
- return &cobra.Command{
- Use: "list",
- Short: "Lists all users",
- Run: userListCommandFunc,
- }
-}
-
-func newUserChangePasswordCommand() *cobra.Command {
- cmd := cobra.Command{
- Use: "passwd [options]",
- Short: "Changes password of user",
- Run: userChangePasswordCommandFunc,
- }
-
- cmd.Flags().BoolVar(&passwordInteractive, "interactive", true, "If true, read password from stdin instead of interactive terminal")
-
- return &cmd
-}
-
-func newUserGrantRoleCommand() *cobra.Command {
- return &cobra.Command{
- Use: "grant-role ",
- Short: "Grants a role to a user",
- Run: userGrantRoleCommandFunc,
- }
-}
-
-func newUserRevokeRoleCommand() *cobra.Command {
- return &cobra.Command{
- Use: "revoke-role ",
- Short: "Revokes a role from a user",
- Run: userRevokeRoleCommandFunc,
- }
-}
-
-// userAddCommandFunc executes the "user add" command.
-func userAddCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user add command requires user name as its argument"))
- }
-
- var password string
- var user string
-
- options := &clientv3.UserAddOptions{
- NoPassword: false,
- }
-
- if !noPassword {
- if passwordFromFlag != "" {
- user = args[0]
- password = passwordFromFlag
- } else {
- splitted := strings.SplitN(args[0], ":", 2)
- if len(splitted) < 2 {
- user = args[0]
- if !passwordInteractive {
- fmt.Scanf("%s", &password)
- } else {
- password = readPasswordInteractive(args[0])
- }
- } else {
- user = splitted[0]
- password = splitted[1]
- if len(user) == 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("empty user name is not allowed"))
- }
- }
- }
- } else {
- user = args[0]
- options.NoPassword = true
- }
-
- resp, err := mustClientFromCmd(cmd).Auth.UserAddWithOptions(context.TODO(), user, password, options)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.UserAdd(user, *resp)
-}
-
-// userDeleteCommandFunc executes the "user delete" command.
-func userDeleteCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user delete command requires user name as its argument"))
- }
-
- resp, err := mustClientFromCmd(cmd).Auth.UserDelete(context.TODO(), args[0])
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- display.UserDelete(args[0], *resp)
-}
-
-// userGetCommandFunc executes the "user get" command.
-func userGetCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user get command requires user name as its argument"))
- }
-
- name := args[0]
- client := mustClientFromCmd(cmd)
- resp, err := client.Auth.UserGet(context.TODO(), name)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- if userShowDetail {
- fmt.Printf("User: %s\n", name)
- for _, role := range resp.Roles {
- fmt.Print("\n")
- roleResp, err := client.Auth.RoleGet(context.TODO(), role)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- display.RoleGet(role, *roleResp)
- }
- } else {
- display.UserGet(name, *resp)
- }
-}
-
-// userListCommandFunc executes the "user list" command.
-func userListCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user list command requires no arguments"))
- }
-
- resp, err := mustClientFromCmd(cmd).Auth.UserList(context.TODO())
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.UserList(*resp)
-}
-
-// userChangePasswordCommandFunc executes the "user passwd" command.
-func userChangePasswordCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 1 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user passwd command requires user name as its argument"))
- }
-
- var password string
-
- if !passwordInteractive {
- fmt.Scanf("%s", &password)
- } else {
- password = readPasswordInteractive(args[0])
- }
-
- resp, err := mustClientFromCmd(cmd).Auth.UserChangePassword(context.TODO(), args[0], password)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.UserChangePassword(*resp)
-}
-
-// userGrantRoleCommandFunc executes the "user grant-role" command.
-func userGrantRoleCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 2 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user grant command requires user name and role name as its argument"))
- }
-
- resp, err := mustClientFromCmd(cmd).Auth.UserGrantRole(context.TODO(), args[0], args[1])
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.UserGrantRole(args[0], args[1], *resp)
-}
-
-// userRevokeRoleCommandFunc executes the "user revoke-role" command.
-func userRevokeRoleCommandFunc(cmd *cobra.Command, args []string) {
- if len(args) != 2 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("user revoke-role requires user name and role name as its argument"))
- }
-
- resp, err := mustClientFromCmd(cmd).Auth.UserRevokeRole(context.TODO(), args[0], args[1])
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
-
- display.UserRevokeRole(args[0], args[1], *resp)
-}
-
-func readPasswordInteractive(name string) string {
- prompt1 := fmt.Sprintf("Password of %s: ", name)
- password1, err1 := speakeasy.Ask(prompt1)
- if err1 != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("failed to ask password: %s", err1))
- }
-
- if len(password1) == 0 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("empty password"))
- }
-
- prompt2 := fmt.Sprintf("Type password of %s again for confirmation: ", name)
- password2, err2 := speakeasy.Ask(prompt2)
- if err2 != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("failed to ask password: %s", err2))
- }
-
- if password1 != password2 {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("given passwords are different"))
- }
-
- return password1
-}
diff --git a/etcdctl/ctlv3/command/util.go b/etcdctl/ctlv3/command/util.go
index 8338ef33dcd..c24b9d11ec0 100644
--- a/etcdctl/ctlv3/command/util.go
+++ b/etcdctl/ctlv3/command/util.go
@@ -19,16 +19,16 @@ import (
"crypto/tls"
"encoding/hex"
"fmt"
- "io"
+ "io/ioutil"
"net/http"
"regexp"
"strconv"
"strings"
"time"
- pb "go.etcd.io/etcd/api/v3/mvccpb"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ v3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+ pb "github.com/ls-2018/etcd_cn/offical/api/v3/mvccpb"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
)
@@ -36,13 +36,13 @@ import (
func printKV(isHex bool, valueOnly bool, kv *pb.KeyValue) {
k, v := string(kv.Key), string(kv.Value)
if isHex {
- k = addHexPrefix(hex.EncodeToString(kv.Key))
- v = addHexPrefix(hex.EncodeToString(kv.Value))
+ k = addHexPrefix(hex.EncodeToString([]byte(kv.Key)))
+ v = addHexPrefix(hex.EncodeToString([]byte(kv.Value)))
}
if !valueOnly {
- fmt.Println(k)
+ fmt.Println("printKV--->", k)
}
- fmt.Println(v)
+ fmt.Println("printKV--->", v)
}
func addHexPrefix(s string) string {
@@ -56,7 +56,7 @@ func addHexPrefix(s string) string {
return string(ns)
}
-func Argify(s string) []string {
+func argify(s string) []string {
r := regexp.MustCompile(`"(?:[^"\\]|\\.)*"|'[^']*'|[^'"\s]\S*[^'"\s]?`)
args := r.FindAllString(s, -1)
for i := range args {
@@ -76,14 +76,6 @@ func Argify(s string) []string {
return args
}
-func commandCtx(cmd *cobra.Command) (context.Context, context.CancelFunc) {
- timeOut, err := cmd.Flags().GetDuration("command-timeout")
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- return context.WithTimeout(context.Background(), timeOut)
-}
-
func isCommandTimeoutFlagSet(cmd *cobra.Command) bool {
commandTimeoutFlag := cmd.Flags().Lookup("command-timeout")
if commandTimeoutFlag == nil {
@@ -92,8 +84,8 @@ func isCommandTimeoutFlagSet(cmd *cobra.Command) bool {
return commandTimeoutFlag.Changed
}
-// get the process_resident_memory_bytes from /metrics
-func endpointMemoryMetrics(host string, scfg *clientv3.SecureConfig) float64 {
+// get the process_resident_memory_bytes from /metrics
+func endpointMemoryMetrics(host string, scfg *secureCfg) float64 {
residentMemoryKey := "process_resident_memory_bytes"
var residentMemoryValue string
if !strings.HasPrefix(host, "http://") && !strings.HasPrefix(host, "https://") {
@@ -102,14 +94,14 @@ func endpointMemoryMetrics(host string, scfg *clientv3.SecureConfig) float64 {
url := host + "/metrics"
if strings.HasPrefix(host, "https://") {
// load client certificate
- cert, err := tls.LoadX509KeyPair(scfg.Cert, scfg.Key)
+ cert, err := tls.LoadX509KeyPair(scfg.cert, scfg.key)
if err != nil {
fmt.Println(fmt.Sprintf("client certificate error: %v", err))
return 0.0
}
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
- InsecureSkipVerify: scfg.InsecureSkipVerify,
+ InsecureSkipVerify: scfg.insecureSkipVerify,
}
}
resp, err := http.Get(url)
@@ -117,7 +109,7 @@ func endpointMemoryMetrics(host string, scfg *clientv3.SecureConfig) float64 {
fmt.Println(fmt.Sprintf("fetch error: %v", err))
return 0.0
}
- byts, readerr := io.ReadAll(resp.Body)
+ byts, readerr := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if readerr != nil {
fmt.Println(fmt.Sprintf("fetch error: reading %s: %v", url, readerr))
@@ -144,10 +136,10 @@ func endpointMemoryMetrics(host string, scfg *clientv3.SecureConfig) float64 {
}
// compact keyspace history to a provided revision
-func compact(c *clientv3.Client, rev int64) {
+func compact(c *v3.Client, rev int64) {
fmt.Printf("Compacting with revision %d\n", rev)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- _, err := c.Compact(ctx, rev, clientv3.WithCompactPhysical())
+ _, err := c.Compact(ctx, rev, v3.WithCompactPhysical())
cancel()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
@@ -155,14 +147,22 @@ func compact(c *clientv3.Client, rev int64) {
fmt.Printf("Compacted with revision %d\n", rev)
}
-// defrag a given endpoint
-func defrag(c *clientv3.Client, ep string) {
- fmt.Printf("Defragmenting %q\n", ep)
+func defrag(c *v3.Client, ep string) {
+ fmt.Printf("开始内存碎片整理 %q\n", ep)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
_, err := c.Defragment(ctx, ep)
cancel()
if err != nil {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
- fmt.Printf("Defragmented %q\n", ep)
+ fmt.Printf("内存碎片整理 %q\n", ep)
+}
+
+// 超时上下文,默认5s
+func commandCtx(cmd *cobra.Command) (context.Context, context.CancelFunc) {
+ timeOut, err := cmd.Flags().GetDuration("command-timeout")
+ if err != nil {
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
+ }
+ return context.WithTimeout(context.Background(), timeOut)
}
diff --git a/etcdctl/ctlv3/command/version_command.go b/etcdctl/ctlv3/command/version_command.go
deleted file mode 100644
index b65c299048b..00000000000
--- a/etcdctl/ctlv3/command/version_command.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "fmt"
-
- "go.etcd.io/etcd/api/v3/version"
-
- "github.com/spf13/cobra"
-)
-
-// NewVersionCommand prints out the version of etcd.
-func NewVersionCommand() *cobra.Command {
- return &cobra.Command{
- Use: "version",
- Short: "Prints the version of etcdctl",
- Run: versionCommandFunc,
- }
-}
-
-func versionCommandFunc(cmd *cobra.Command, args []string) {
- fmt.Println("etcdctl version:", version.Version)
- fmt.Println("API version:", version.APIVersion)
-}
diff --git a/etcdctl/ctlv3/command/watch_command.go b/etcdctl/ctlv3/command/watch_command.go
index d8592cb4e77..2c2b84ea23d 100644
--- a/etcdctl/ctlv3/command/watch_command.go
+++ b/etcdctl/ctlv3/command/watch_command.go
@@ -23,8 +23,9 @@ import (
"os/exec"
"strings"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ clientv3 "github.com/ls-2018/etcd_cn/client_sdk/v3"
+
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
)
@@ -33,7 +34,7 @@ var (
errBadArgsNum = errors.New("bad number of arguments")
errBadArgsNumConflictEnv = errors.New("bad number of arguments (found conflicting environment key)")
errBadArgsNumSeparator = errors.New("bad number of arguments (found separator --, but no commands)")
- errBadArgsInteractiveWatch = errors.New("args[0] must be 'watch' for interactive calls")
+ errBadArgsInteractiveWatch = errors.New("args[0]必须是'watch' for interactive calls")
)
var (
@@ -44,24 +45,21 @@ var (
progressNotify bool
)
-// NewWatchCommand returns the cobra command for "watch".
func NewWatchCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "watch [options] [key or prefix] [range_end] [--] [exec-command arg1 arg2 ...]",
- Short: "Watches events stream on keys or prefixes",
+ Short: "监听键或前缀上的事件流",
Run: watchCommandFunc,
}
- cmd.Flags().BoolVarP(&watchInteractive, "interactive", "i", false, "Interactive mode")
- cmd.Flags().BoolVar(&watchPrefix, "prefix", false, "Watch on a prefix if prefix is set")
- cmd.Flags().Int64Var(&watchRev, "rev", 0, "Revision to start watching")
- cmd.Flags().BoolVar(&watchPrevKey, "prev-kv", false, "get the previous key-value pair before the event happens")
- cmd.Flags().BoolVar(&progressNotify, "progress-notify", false, "get periodic watch progress notification from server")
-
+ cmd.Flags().BoolVarP(&watchInteractive, "interactive", "i", false, "交互模式")
+ cmd.Flags().BoolVar(&watchPrefix, "prefix", false, "是否监听前缀")
+ cmd.Flags().Int64Var(&watchRev, "rev", 0, "从那个修订版本开始监听")
+ cmd.Flags().BoolVar(&watchPrevKey, "prev-kv", false, "获取事件发生之前的键值对")
+ cmd.Flags().BoolVar(&progressNotify, "progress-notify", false, "从etcd获取定期的监听进度通知")
return cmd
}
-// watchCommandFunc executes the "watch" command.
func watchCommandFunc(cmd *cobra.Command, args []string) {
envKey, envRange := os.Getenv("ETCDCTL_WATCH_KEY"), os.Getenv("ETCDCTL_WATCH_RANGE_END")
if envKey == "" && envRange != "" {
@@ -88,7 +86,7 @@ func watchCommandFunc(cmd *cobra.Command, args []string) {
if err = c.Close(); err != nil {
cobrautl.ExitWithError(cobrautl.ExitBadConnection, err)
}
- cobrautl.ExitWithError(cobrautl.ExitInterrupted, fmt.Errorf("watch is canceled by the server"))
+ cobrautl.ExitWithError(cobrautl.ExitInterrupted, fmt.Errorf("etcd取消了监听"))
}
func watchInteractiveFunc(cmd *cobra.Command, osArgs []string, envKey, envRange string) {
@@ -103,7 +101,7 @@ func watchInteractiveFunc(cmd *cobra.Command, osArgs []string, envKey, envRange
}
l = strings.TrimSuffix(l, "\n")
- args := Argify(l)
+ args := argify(l)
if len(args) < 1 {
fmt.Fprintf(os.Stderr, "Invalid command: %s (watch and progress supported)\n", l)
continue
@@ -166,10 +164,10 @@ func getWatchChan(c *clientv3.Client, args []string) (clientv3.WatchChan, error)
func printWatchCh(c *clientv3.Client, ch clientv3.WatchChan, execArgs []string) {
for resp := range ch {
if resp.Canceled {
- fmt.Fprintf(os.Stderr, "watch was canceled (%v)\n", resp.Err())
+ fmt.Fprintf(os.Stderr, "监听取消了 (%v)\n", resp.Err())
}
if resp.IsProgressNotify() {
- fmt.Fprintf(os.Stdout, "progress notify: %d\n", resp.Header.Revision)
+ fmt.Fprintf(os.Stdout, "进程通知: %d\n", resp.Header.Revision)
}
display.Watch(resp)
diff --git a/etcdctl/ctlv3/command/watch_command_test.go b/etcdctl/ctlv3/command/watch_command_test.go
deleted file mode 100644
index 2292deadcbb..00000000000
--- a/etcdctl/ctlv3/command/watch_command_test.go
+++ /dev/null
@@ -1,558 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package command
-
-import (
- "reflect"
- "testing"
-)
-
-func Test_parseWatchArgs(t *testing.T) {
- tt := []struct {
- osArgs []string // raw arguments to "watch" command
- commandArgs []string // arguments after "spf13/cobra" preprocessing
- envKey, envRange string
- interactive bool
-
- interactiveWatchPrefix bool
- interactiveWatchRev int64
- interactiveWatchPrevKey bool
-
- watchArgs []string
- execArgs []string
- err error
- }{
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar"},
- commandArgs: []string{"foo", "bar"},
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--"},
- commandArgs: []string{"foo", "bar"},
- interactive: false,
- watchArgs: nil,
- execArgs: nil,
- err: errBadArgsNumSeparator,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch"},
- commandArgs: nil,
- envKey: "foo",
- envRange: "bar",
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo"},
- commandArgs: []string{"foo"},
- envKey: "foo",
- envRange: "",
- interactive: false,
- watchArgs: nil,
- execArgs: nil,
- err: errBadArgsNumConflictEnv,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar"},
- commandArgs: []string{"foo", "bar"},
- envKey: "foo",
- envRange: "",
- interactive: false,
- watchArgs: nil,
- execArgs: nil,
- err: errBadArgsNumConflictEnv,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar"},
- commandArgs: []string{"foo", "bar"},
- envKey: "foo",
- envRange: "bar",
- interactive: false,
- watchArgs: nil,
- execArgs: nil,
- err: errBadArgsNumConflictEnv,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo"},
- commandArgs: []string{"foo"},
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch"},
- commandArgs: nil,
- envKey: "foo",
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo"},
- commandArgs: []string{"foo"},
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo"},
- commandArgs: []string{"foo"},
- envKey: "foo",
- interactive: false,
- watchArgs: nil,
- execArgs: nil,
- err: errBadArgsNumConflictEnv,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1"},
- commandArgs: nil,
- envKey: "foo",
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1"},
- commandArgs: []string{"foo"},
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "--", "echo", "Hello", "World"},
- commandArgs: []string{"foo", "echo", "Hello", "World"},
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "--", "echo", "watch", "event", "received"},
- commandArgs: []string{"foo", "echo", "watch", "event", "received"},
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "watch", "event", "received"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1", "--", "echo", "Hello", "World"},
- commandArgs: []string{"foo", "echo", "Hello", "World"},
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1", "--", "echo", "watch", "event", "received"},
- commandArgs: []string{"foo", "echo", "watch", "event", "received"},
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "watch", "event", "received"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo", "--", "echo", "watch", "event", "received"},
- commandArgs: []string{"foo", "echo", "watch", "event", "received"},
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "watch", "event", "received"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--", "echo", "Hello", "World"},
- commandArgs: []string{"foo", "bar", "echo", "Hello", "World"},
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo", "bar", "--", "echo", "Hello", "World"},
- commandArgs: []string{"foo", "bar", "echo", "Hello", "World"},
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1", "bar", "--", "echo", "Hello", "World"},
- commandArgs: []string{"foo", "bar", "echo", "Hello", "World"},
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--rev", "1", "--", "echo", "Hello", "World"},
- commandArgs: []string{"foo", "bar", "echo", "Hello", "World"},
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--rev", "1", "--", "echo", "watch", "event", "received"},
- commandArgs: []string{"foo", "bar", "echo", "watch", "event", "received"},
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "watch", "event", "received"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "--rev", "1", "bar", "--", "echo", "Hello", "World"},
- commandArgs: []string{"foo", "bar", "echo", "Hello", "World"},
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "foo", "bar", "--", "echo", "Hello", "World"},
- commandArgs: []string{"foo", "bar", "echo", "Hello", "World"},
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "--", "echo", "Hello", "World"},
- commandArgs: []string{"echo", "Hello", "World"},
- envKey: "foo",
- envRange: "",
- interactive: false,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "--rev", "1", "--", "echo", "Hello", "World"},
- commandArgs: []string{"echo", "Hello", "World"},
- envKey: "foo",
- envRange: "bar",
- interactive: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "foo", "bar", "--rev", "1", "--", "echo", "Hello", "World"},
- commandArgs: []string{"foo", "bar", "echo", "Hello", "World"},
- envKey: "foo",
- interactive: false,
- watchArgs: nil,
- execArgs: nil,
- err: errBadArgsNumConflictEnv,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"foo", "bar", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: nil,
- execArgs: nil,
- err: errBadArgsInteractiveWatch,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "foo"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "foo", "bar"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch"},
- envKey: "foo",
- envRange: "bar",
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch"},
- envKey: "hello world!",
- envRange: "bar",
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"hello world!", "bar"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "foo", "--rev", "1"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 1,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "foo", "--rev", "1", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 1,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--rev", "1", "foo", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 1,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--rev", "5", "--prev-kv", "foo", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 5,
- interactiveWatchPrevKey: true,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--rev", "1"},
- envKey: "foo",
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 1,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--rev", "1"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: nil,
- execArgs: nil,
- err: errBadArgsNum,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--rev", "1", "--prefix"},
- envKey: "foo",
- interactive: true,
- interactiveWatchPrefix: true,
- interactiveWatchRev: 1,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--rev", "100", "--prefix", "--prev-kv"},
- envKey: "foo",
- interactive: true,
- interactiveWatchPrefix: true,
- interactiveWatchRev: 100,
- interactiveWatchPrevKey: true,
- watchArgs: []string{"foo"},
- execArgs: nil,
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--rev", "1", "--prefix"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: nil,
- execArgs: nil,
- err: errBadArgsNum,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--", "echo", "Hello", "World"},
- envKey: "foo",
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--", "echo", "Hello", "World"},
- envKey: "foo",
- envRange: "bar",
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "foo", "bar", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 0,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--rev", "1", "foo", "bar", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 1,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "--rev", "1", "--", "echo", "Hello", "World"},
- envKey: "foo",
- envRange: "bar",
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 1,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "foo", "--rev", "1", "bar", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 1,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "foo", "bar", "--rev", "1", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: false,
- interactiveWatchRev: 1,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "foo", "bar", "--rev", "7", "--prefix", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: true,
- interactiveWatchRev: 7,
- interactiveWatchPrevKey: false,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- {
- osArgs: []string{"./bin/etcdctl", "watch", "-i"},
- commandArgs: []string{"watch", "foo", "bar", "--rev", "7", "--prefix", "--prev-kv", "--", "echo", "Hello", "World"},
- interactive: true,
- interactiveWatchPrefix: true,
- interactiveWatchRev: 7,
- interactiveWatchPrevKey: true,
- watchArgs: []string{"foo", "bar"},
- execArgs: []string{"echo", "Hello", "World"},
- err: nil,
- },
- }
- for i, ts := range tt {
- watchArgs, execArgs, err := parseWatchArgs(ts.osArgs, ts.commandArgs, ts.envKey, ts.envRange, ts.interactive)
- if err != ts.err {
- t.Fatalf("#%d: error expected %v, got %v", i, ts.err, err)
- }
- if !reflect.DeepEqual(watchArgs, ts.watchArgs) {
- t.Fatalf("#%d: watchArgs expected %q, got %v", i, ts.watchArgs, watchArgs)
- }
- if !reflect.DeepEqual(execArgs, ts.execArgs) {
- t.Fatalf("#%d: execArgs expected %q, got %v", i, ts.execArgs, execArgs)
- }
- if ts.interactive {
- if ts.interactiveWatchPrefix != watchPrefix {
- t.Fatalf("#%d: interactive watchPrefix expected %v, got %v", i, ts.interactiveWatchPrefix, watchPrefix)
- }
- if ts.interactiveWatchRev != watchRev {
- t.Fatalf("#%d: interactive watchRev expected %d, got %d", i, ts.interactiveWatchRev, watchRev)
- }
- if ts.interactiveWatchPrevKey != watchPrevKey {
- t.Fatalf("#%d: interactive watchPrevKey expected %v, got %v", i, ts.interactiveWatchPrevKey, watchPrevKey)
- }
- }
- }
-}
diff --git a/etcdctl/ctlv3/ctl.go b/etcdctl/ctlv3/ctl.go
index 6d3abfb3d94..a4761798d93 100644
--- a/etcdctl/ctlv3/ctl.go
+++ b/etcdctl/ctlv3/ctl.go
@@ -12,23 +12,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package ctlv3 contains the main entry point for the etcdctl for v3 API.
+// ctlv3 包含用于v3 API的etcdctl的主入口点.
package ctlv3
import (
- "os"
"time"
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/etcdctl/v3/ctlv3/command"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ "github.com/ls-2018/etcd_cn/etcdctl/ctlv3/command"
+ "github.com/ls-2018/etcd_cn/offical/api/v3/version"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
)
const (
cliName = "etcdctl"
- cliDescription = "A simple command line client for etcd3."
+ cliDescription = "etcd3的一个简单的命令行客户机."
defaultDialTimeout = 2 * time.Second
defaultCommandTimeOut = 5 * time.Second
@@ -36,48 +35,41 @@ const (
defaultKeepAliveTimeOut = 6 * time.Second
)
-var (
- globalFlags = command.GlobalFlags{}
-)
+var globalFlags = command.GlobalFlags{}
-var (
- rootCmd = &cobra.Command{
- Use: cliName,
- Short: cliDescription,
- SuggestFor: []string{"etcdctl"},
- }
-)
+var rootCmd = &cobra.Command{
+ Use: cliName,
+ Short: cliDescription,
+ SuggestFor: []string{"etcdctl"},
+}
func init() {
- rootCmd.PersistentFlags().StringSliceVar(&globalFlags.Endpoints, "endpoints", []string{"127.0.0.1:2379"}, "gRPC endpoints")
- rootCmd.PersistentFlags().BoolVar(&globalFlags.Debug, "debug", false, "enable client-side debug logging")
+ rootCmd.PersistentFlags().StringSliceVar(&globalFlags.Endpoints, "endpoints", []string{"127.0.0.1:2379"}, "gRPC端点")
+ rootCmd.PersistentFlags().BoolVar(&globalFlags.Debug, "debug", false, "启用客户端调试日志记录")
- rootCmd.PersistentFlags().StringVarP(&globalFlags.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)")
- rootCmd.PersistentFlags().BoolVar(&globalFlags.IsHex, "hex", false, "print byte strings as hex encoded strings")
- rootCmd.RegisterFlagCompletionFunc("write-out", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
- return []string{"fields", "json", "protobuf", "simple", "table"}, cobra.ShellCompDirectiveDefault
- })
+ rootCmd.PersistentFlags().StringVarP(&globalFlags.OutputFormat, "write-out", "w", "simple", "设置输出格式 (fields, json, protobuf, simple, table)")
+ rootCmd.PersistentFlags().BoolVar(&globalFlags.IsHex, "hex", false, "以十六进制编码的字符串输出字节串")
- rootCmd.PersistentFlags().DurationVar(&globalFlags.DialTimeout, "dial-timeout", defaultDialTimeout, "dial timeout for client connections")
- rootCmd.PersistentFlags().DurationVar(&globalFlags.CommandTimeOut, "command-timeout", defaultCommandTimeOut, "timeout for short running command (excluding dial timeout)")
- rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTime, "keepalive-time", defaultKeepAliveTime, "keepalive time for client connections")
- rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTimeout, "keepalive-timeout", defaultKeepAliveTimeOut, "keepalive timeout for client connections")
+ rootCmd.PersistentFlags().DurationVar(&globalFlags.DialTimeout, "dial-timeout", defaultDialTimeout, "拨号客户端连接超时")
+ rootCmd.PersistentFlags().DurationVar(&globalFlags.CommandTimeOut, "command-timeout", defaultCommandTimeOut, "运行命令的超时(不包括拨号超时).")
+ rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTime, "keepalive-time", defaultKeepAliveTime, "客户端连接的存活时间")
+ rootCmd.PersistentFlags().DurationVar(&globalFlags.KeepAliveTimeout, "keepalive-timeout", defaultKeepAliveTimeOut, "客户端连接的Keepalive超时")
// TODO: secure by default when etcd enables secure gRPC by default.
- rootCmd.PersistentFlags().BoolVar(&globalFlags.Insecure, "insecure-transport", true, "disable transport security for client connections")
- rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureDiscovery, "insecure-discovery", true, "accept insecure SRV records describing cluster endpoints")
- rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureSkipVerify, "insecure-skip-tls-verify", false, "skip server certificate verification (CAUTION: this option should be enabled only for testing purposes)")
- rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.CertFile, "cert", "", "identify secure client using this TLS certificate file")
- rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.KeyFile, "key", "", "identify secure client using this TLS key file")
- rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.TrustedCAFile, "cacert", "", "verify certificates of TLS-enabled secure servers using this CA bundle")
- rootCmd.PersistentFlags().StringVar(&globalFlags.User, "user", "", "username[:password] for authentication (prompt if password is not supplied)")
- rootCmd.PersistentFlags().StringVar(&globalFlags.Password, "password", "", "password for authentication (if this option is used, --user option shouldn't include password)")
- rootCmd.PersistentFlags().StringVarP(&globalFlags.TLS.ServerName, "discovery-srv", "d", "", "domain name to query for SRV records describing cluster endpoints")
- rootCmd.PersistentFlags().StringVarP(&globalFlags.DNSClusterServiceName, "discovery-srv-name", "", "", "service name to query when using DNS discovery")
+ rootCmd.PersistentFlags().BoolVar(&globalFlags.Insecure, "insecure-transport", true, "为客户端连接禁用传输安全性")
+ rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureDiscovery, "insecure-discovery", true, "接受描述集群端点的不安全的SRV记录")
+ rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureSkipVerify, "insecure-skip-tls-verify", false, "跳过 etcd 证书验证 (注意:该选项仅用于测试目的.)")
+ rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.CertFile, "cert", "", "识别使用该TLS证书文件的安全客户端")
+ rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.KeyFile, "key", "", "识别使用该TLS密钥文件的安全客户端")
+ rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.TrustedCAFile, "cacert", "", "使用此CA包验证启用tls的安全服务器的证书")
+ rootCmd.PersistentFlags().StringVar(&globalFlags.User, "user", "", "username[:password] (如果没有提供密码,则提示)")
+ rootCmd.PersistentFlags().StringVar(&globalFlags.Password, "password", "", "身份验证的密码(如果使用了这个选项,——user选项不应该包含密码)")
+ rootCmd.PersistentFlags().StringVarP(&globalFlags.TLS.ServerName, "discovery-srv", "d", "", "查询描述集群端点的SRV记录的域名")
+ rootCmd.PersistentFlags().StringVarP(&globalFlags.DNSClusterServiceName, "discovery-srv-name", "", "", "使用DNS发现时需要查询的服务名称")
rootCmd.AddCommand(
command.NewGetCommand(),
- command.NewPutCommand(),
+ command.NewPutCommand(), // ✅
command.NewDelCommand(),
command.NewTxnCommand(),
command.NewCompactionCommand(),
@@ -97,8 +89,6 @@ func init() {
command.NewUserCommand(),
command.NewRoleCommand(),
command.NewCheckCommand(),
- command.NewCompletionCommand(),
- command.NewDowngradeCommand(),
)
}
@@ -115,11 +105,7 @@ func Start() error {
func MustStart() {
if err := Start(); err != nil {
- if rootCmd.SilenceErrors {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- } else {
- os.Exit(cobrautl.ExitError)
- }
+ cobrautl.ExitWithError(cobrautl.ExitError, err)
}
}
diff --git a/etcdctl/doc/mirror_maker.md b/etcdctl/doc/mirror_maker.md
deleted file mode 100644
index 3da15dca88c..00000000000
--- a/etcdctl/doc/mirror_maker.md
+++ /dev/null
@@ -1,29 +0,0 @@
-## Mirror Maker
-
-Mirror maker mirrors a prefix in the key-value space of an etcd cluster into another prefix in another cluster. Mirroring is designed for copying configuration to various clusters distributed around the world. Mirroring usually has very low latency once it completes synchronizing with the initial state. Mirror maker utilizes the etcd watcher facility to immediately inform the mirror of any key modifications. Based on our experiments, the network latency between the mirror maker and the two clusters accounts for most of the latency. If the network is healthy, copying configuration held in etcd to the mirror should take under one second even for a world-wide deployment.
-
-If the mirror maker fails to connect to one of the clusters, the mirroring will pause. Mirroring can be resumed automatically once connectivity is reestablished.
-
-The mirroring mechanism is unidirectional. Changing the value on the mirrored cluster won't reflect the value back to the origin cluster. The mirror maker only mirrors key-value pairs; metadata, such as version number or modification revision, is discarded. However, mirror maker still attempts to preserve update ordering during normal operation, but there is no ordering guarantee during initial sync nor during failure recovery following network interruption. As a rule of thumb, the ordering of the updates on the mirror should not be considered reliable.
-
-```
-+-------------+
-| |
-| source | +-----------+
-| cluster +----> | mirror |
-| | | maker |
-+-------------+ +---+-------+
- |
- v
- +-------------+
- | |
- | mirror |
- | cluster |
- | |
- +-------------+
-
-```
-
-Mirror-maker is a built-in feature of [etcdctl][etcdctl].
-
-[etcdctl]: ../README.md
diff --git a/etcdctl/go.mod b/etcdctl/go.mod
deleted file mode 100644
index 8c4ab38a3dc..00000000000
--- a/etcdctl/go.mod
+++ /dev/null
@@ -1,56 +0,0 @@
-module go.etcd.io/etcd/etcdctl/v3
-
-go 1.19
-
-require (
- github.com/bgentry/speakeasy v0.1.0
- github.com/cheggaaa/pb/v3 v3.1.0
- github.com/dustin/go-humanize v1.0.1
- github.com/olekukonko/tablewriter v0.0.5
- github.com/spf13/cobra v1.6.1
- github.com/spf13/pflag v1.0.5
- go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/client/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0
- go.uber.org/zap v1.24.0
- golang.org/x/time v0.0.0-20220609170525-579cf78fd858
- google.golang.org/grpc v1.51.0
-)
-
-require (
- github.com/VividCortex/ewma v1.1.1 // indirect
- github.com/coreos/go-semver v0.3.1 // indirect
- github.com/coreos/go-systemd/v22 v22.5.0 // indirect
- github.com/fatih/color v1.13.0 // indirect
- github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/inconshreveable/mousetrap v1.0.1 // indirect
- github.com/mattn/go-colorable v0.1.12 // indirect
- github.com/mattn/go-isatty v0.0.14 // indirect
- github.com/mattn/go-runewidth v0.0.12 // indirect
- github.com/pkg/errors v0.9.1 // indirect
- github.com/rivo/uniseg v0.2.0 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.9.0 // indirect
- golang.org/x/net v0.4.0 // indirect
- golang.org/x/sys v0.3.0 // indirect
- golang.org/x/text v0.5.0 // indirect
- google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect
- google.golang.org/protobuf v1.28.1 // indirect
-)
-
-replace (
- go.etcd.io/etcd/api/v3 => ../api
- go.etcd.io/etcd/client/pkg/v3 => ../client/pkg
- go.etcd.io/etcd/client/v3 => ../client/v3
- go.etcd.io/etcd/pkg/v3 => ../pkg
-)
-
-// Bad imports are sometimes causing attempts to pull that code.
-// This makes the error more explicit.
-replace (
- go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/tests/v3 => ./FORBIDDEN_DEPENDENCY
-)
diff --git a/etcdctl/go.sum b/etcdctl/go.sum
deleted file mode 100644
index 070aa8aea05..00000000000
--- a/etcdctl/go.sum
+++ /dev/null
@@ -1,224 +0,0 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
-github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cheggaaa/pb/v3 v3.1.0 h1:3uouEsl32RL7gTiQsuaXD4Bzbfl5tGztXGUvXbs4O04=
-github.com/cheggaaa/pb/v3 v3.1.0/go.mod h1:YjrevcBqadFDaGQKRdmZxTY42pXEqda48Ea3lt0K/BE=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
-github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
-github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
-github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
-github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
-github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
-github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
-github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
-github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow=
-github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
-github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
-github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
-github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
-github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
-go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
-go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
-golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
-golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
-golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
-golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
-google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/etcdctl/main.go b/etcdctl/main.go
index 95b3416dea5..b04acef4fe8 100644
--- a/etcdctl/main.go
+++ b/etcdctl/main.go
@@ -12,24 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// etcdctl is a command line application that controls etcd.
+// Etcdctl是一个控制etcd的命令行应用程序.
package main
import (
- "go.etcd.io/etcd/etcdctl/v3/ctlv3"
+ "github.com/ls-2018/etcd_cn/etcdctl/ctlv3"
)
-/*
-*
-mainWithError is fully analogous to main, but instead of signaling errors
-by os.Exit, it exposes the error explicitly, such that test-logic can intercept
-control to e.g. dump coverage data (even for test-for-failure scenarios).
-*/
-func mainWithError() error {
- return ctlv3.Start()
-}
-
func main() {
ctlv3.MustStart()
- return
}
diff --git a/etcdctl/main_test.go b/etcdctl/main_test.go
deleted file mode 100644
index 8edbb0ae9b8..00000000000
--- a/etcdctl/main_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "log"
- "os"
- "strings"
- "testing"
-)
-
-func SplitTestArgs(args []string) (testArgs, appArgs []string) {
- for i, arg := range args {
- switch {
- case strings.HasPrefix(arg, "-test."):
- testArgs = append(testArgs, arg)
- case i == 0:
- appArgs = append(appArgs, arg)
- testArgs = append(testArgs, arg)
- default:
- appArgs = append(appArgs, arg)
- }
- }
- return
-}
-
-// TestEmpty is an empty test to avoid no-tests warning.
-func TestEmpty(t *testing.T) {}
-
-/**
- * The purpose of this "test" is to run etcdctl with code-coverage
- * collection turned on. The technique is documented here:
- *
- * https://www.cyphar.com/blog/post/20170412-golang-integration-coverage
- */
-func TestMain(m *testing.M) {
- // don't launch etcdctl when invoked via go test
- if strings.HasSuffix(os.Args[0], "etcdctl.test") {
- return
- }
-
- testArgs, appArgs := SplitTestArgs(os.Args)
-
- os.Args = appArgs
-
- err := mainWithError()
- if err != nil {
- log.Fatalf("etcdctl failed with: %v", err)
- }
-
- // This will generate coverage files:
- os.Args = testArgs
- m.Run()
-}
diff --git a/etcdutl/LICENSE b/etcdutl/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/etcdutl/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/etcdutl/README.md b/etcdutl/README.md
index 6943a047858..a5dac609fdf 100644
--- a/etcdutl/README.md
+++ b/etcdutl/README.md
@@ -1,29 +1,10 @@
etcdutl
========
-`etcdutl` is a command line administration utility for [etcd][etcd].
-
-It's designed to operate directly on etcd data files.
-For operations over a network, please use `etcdctl`.
-
+它被设计为直接对etcd数据文件进行操作.对于网络上的操作,请使用`etcdctl`.
### DEFRAG [options]
-
-DEFRAG directly defragments an etcd data directory while etcd is not running.
-When an etcd member reclaims storage space from deleted and compacted keys, the space is kept in a free list and the database file remains the same size. By defragmenting the database, the etcd member releases this free space back to the file system.
-
-In order to defrag a live etcd instances over the network, please use `etcdctl defrag` instead.
-
-#### Options
-
-- data-dir -- Optional. If present, defragments a data directory not in use by etcd.
-
-#### Output
-
-Exit status '0' when the process was successful.
-
-#### Example
-
-To defragment a data directory directly, use the `--data-dir` flag:
+defrag 在etcd不运行时直接对etcd数据目录进行碎片整理.当一个etcd成员从被删除和压缩的键中回收存储空间时 ,该空间被保留在空闲列表中,数据库文件的大小保持不变. 通过
+碎片整理数据库,etcd成员将这些空闲空间释放到文件系统中.
``` bash
# Defragment while etcd is not running
@@ -32,151 +13,33 @@ To defragment a data directory directly, use the `--data-dir` flag:
# Error: cannot open database at default.etcd/member/snap/db
```
-#### Remarks
-
-DEFRAG returns a zero exit code only if it succeeded in defragmenting all given endpoints.
-
-
-### SNAPSHOT RESTORE [options] \
-
-SNAPSHOT RESTORE creates an etcd data directory for an etcd cluster member from a backend database snapshot and a new cluster configuration. Restoring the snapshot into each member for a new cluster configuration will initialize a new etcd cluster preloaded by the snapshot data.
-
-#### Options
-
-The snapshot restore options closely resemble to those used in the `etcd` command for defining a cluster.
-
-- data-dir -- Path to the data directory. Uses \.etcd if none given.
-
-- wal-dir -- Path to the WAL directory. Uses data directory if none given.
-
-- initial-cluster -- The initial cluster configuration for the restored etcd cluster.
-
-- initial-cluster-token -- Initial cluster token for the restored etcd cluster.
-
-- initial-advertise-peer-urls -- List of peer URLs for the member being restored.
-
-- name -- Human-readable name for the etcd cluster member being restored.
-
-- skip-hash-check -- Ignore snapshot integrity hash value (required if copied from data directory)
-
-#### Output
-
-A new etcd data directory initialized with the snapshot.
-
#### Example
Save a snapshot, restore into a new 3 node cluster, and start the cluster:
+
```
-# save snapshot
-./etcdctl snapshot save snapshot.db
+./etcdutl snapshot save snapshot.db
# restore members
-./etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
-./etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:22380 --name sshot2 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
-./etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:32380 --name sshot3 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
+bin/etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
+bin/etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:22380 --name sshot2 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
+bin/etcdutl snapshot restore snapshot.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:32380 --name sshot3 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380'
# launch members
-./etcd --name sshot1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 &
-./etcd --name sshot2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 &
-./etcd --name sshot3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 &
+bin/etcd --name sshot1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 &
+bin/etcd --name sshot2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 &
+bin/etcd --name sshot3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 &
```
-### SNAPSHOT STATUS \
-
-SNAPSHOT STATUS lists information about a given backend database snapshot file.
-
-#### Output
-
-##### Simple format
-Prints a humanized table of the database hash, revision, total keys, and size.
+snapshot restore ../default.etcd/member/snap/bolt.db --initial-cluster-token etcd-cluster-1 --initial-advertise-peer-urls http://127.0.0.1:12380 --name sshot1 --initial-cluster 'sshot1=http://127.0.0.1:12380,sshot2=http://127.0.0.1:22380,sshot3=http://127.0.0.1:32380' --data-dir=123
-##### JSON format
-
-Prints a line of JSON encoding the database hash, revision, total keys, and size.
-
-#### Examples
-```bash
-./etcdutl snapshot status file.db
-# cf1550fb, 3, 3, 25 kB
-```
```bash
-./etcdutl --write-out=json snapshot status file.db
-# {"hash":3474280699,"revision":3,"totalKey":3,"totalSize":24576}
-```
-
-```bash
-./etcdutl --write-out=table snapshot status file.db
+myetcdctl snapshot status ../default.etcd/member/snap/bolt.db --write-out=table
+----------+----------+------------+------------+
| HASH | REVISION | TOTAL KEYS | TOTAL SIZE |
+----------+----------+------------+------------+
-| cf1550fb | 3 | 3 | 25 kB |
+| d1ed6c2f | 0 | 6 | 25 kB |
+----------+----------+------------+------------+
```
-
-### VERSION
-
-Prints the version of etcdutl.
-
-#### Output
-
-Prints etcd version and API version.
-
-#### Examples
-
-
-```bash
-./etcdutl version
-# etcdutl version: 3.5.0
-# API version: 3.1
-```
-
-
-## Exit codes
-
-For all commands, a successful execution returns a zero exit code. All failures will return non-zero exit codes.
-
-## Output formats
-
-All commands accept an output format by setting `-w` or `--write-out`. All commands default to the "simple" output format, which is meant to be human-readable. The simple format is listed in each command's `Output` description since it is customized for each command. If a command has a corresponding RPC, it will respect all output formats.
-
-If a command fails, returning a non-zero exit code, an error string will be written to standard error regardless of output format.
-
-### Simple
-
-A format meant to be easy to parse and human-readable. Specific to each command.
-
-### JSON
-
-The JSON encoding of the command's [RPC response][etcdrpc]. Since etcd's RPCs use byte strings, the JSON output will encode keys and values in base64.
-
-Some commands without an RPC also support JSON; see the command's `Output` description.
-
-### Protobuf
-
-The protobuf encoding of the command's [RPC response][etcdrpc]. If an RPC is streaming, the stream messages will be concatenated. If an RPC is not given for a command, the protobuf output is not defined.
-
-### Fields
-
-An output format similar to JSON but meant to parse with coreutils. For an integer field named `Field`, it writes a line in the format `"Field" : %d` where `%d` is go's integer formatting. For byte array fields, it writes `"Field" : %q` where `%q` is go's quoted string formatting (e.g., `[]byte{'a', '\n'}` is written as `"a\n"`).
-
-## Compatibility Support
-
-etcdutl is still in its early stage. We try out best to ensure fully compatible releases, however we might break compatibility to fix bugs or improve commands. If we intend to release a version of etcdutl with backward incompatibilities, we will provide notice prior to release and have instructions on how to upgrade.
-
-### Input Compatibility
-
-Input includes the command name, its flags, and its arguments. We ensure backward compatibility of the input of normal commands in non-interactive mode.
-
-### Output Compatibility
-Currently, we do not ensure backward compatibility of utility commands.
-
-### TODO: compatibility with etcd server
-
-[etcd]: https://github.com/coreos/etcd
-[READMEv2]: READMEv2.md
-[v2key]: ../store/node_extern.go#L28-L37
-[v3key]: ../api/mvccpb/kv.proto#L12-L29
-[etcdrpc]: ../api/etcdserverpb/rpc.proto
-[storagerpc]: ../api/mvccpb/kv.proto
diff --git a/etcdutl/ctl.go b/etcdutl/ctl.go
deleted file mode 100644
index 5bafe3399e3..00000000000
--- a/etcdutl/ctl.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package etcdutl contains the main entry point for the etcdutl.
-package main
-
-import (
- "github.com/spf13/cobra"
-
- "go.etcd.io/etcd/etcdutl/v3/etcdutl"
-)
-
-const (
- cliName = "etcdutl"
- cliDescription = "An administrative command line tool for etcd3."
-)
-
-var (
- rootCmd = &cobra.Command{
- Use: cliName,
- Short: cliDescription,
- SuggestFor: []string{"etcdutl"},
- }
-)
-
-func init() {
- rootCmd.PersistentFlags().StringVarP(&etcdutl.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)")
- rootCmd.RegisterFlagCompletionFunc("write-out", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) {
- return []string{"fields", "json", "protobuf", "simple", "table"}, cobra.ShellCompDirectiveDefault
- })
-
- rootCmd.AddCommand(
- etcdutl.NewBackupCommand(),
- etcdutl.NewDefragCommand(),
- etcdutl.NewSnapshotCommand(),
- etcdutl.NewVersionCommand(),
- etcdutl.NewCompletionCommand(),
- etcdutl.NewMigrateCommand(),
- )
-}
-
-func Start() error {
- // Make help just show the usage
- rootCmd.SetHelpTemplate(`{{.UsageString}}`)
- return rootCmd.Execute()
-}
-
-func init() {
- cobra.EnablePrefixMatching = true
-}
diff --git a/etcdutl/etcdutl/backup_command.go b/etcdutl/etcdutl/backup_command.go
index 89121a37e95..d1cb0823867 100644
--- a/etcdutl/etcdutl/backup_command.go
+++ b/etcdutl/etcdutl/backup_command.go
@@ -20,26 +20,25 @@ import (
"regexp"
"time"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/datadir"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/verify"
+ "github.com/ls-2018/etcd_cn/etcd/wal"
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
+ "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/pkg/idutil"
+ "github.com/ls-2018/etcd_cn/pkg/pbutil"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
"github.com/spf13/cobra"
- "go.uber.org/zap"
-
- "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/client/pkg/v3/types"
- "go.etcd.io/etcd/pkg/v3/idutil"
- "go.etcd.io/etcd/pkg/v3/pbutil"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
- "go.etcd.io/etcd/server/v3/storage/backend"
- "go.etcd.io/etcd/server/v3/storage/datadir"
- "go.etcd.io/etcd/server/v3/storage/schema"
- "go.etcd.io/etcd/server/v3/storage/wal"
- "go.etcd.io/etcd/server/v3/storage/wal/walpb"
- "go.etcd.io/etcd/server/v3/verify"
- "go.etcd.io/raft/v3/raftpb"
bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
)
var (
@@ -62,13 +61,9 @@ func NewBackupCommand() *cobra.Command {
cmd.Flags().StringVar(&walDir, "wal-dir", "", "Path to the etcd wal dir")
cmd.Flags().StringVar(&backupDir, "backup-dir", "", "Path to the backup dir")
cmd.Flags().StringVar(&backupWalDir, "backup-wal-dir", "", "Path to the backup wal dir")
- cmd.Flags().BoolVar(&withV3, "with-v3", true, "Backup v3 backend data. Note -with-v3=false is not supported since etcd v3.6. Please use v3.5.x client as the last supporting this deprecated functionality.")
+ cmd.Flags().BoolVar(&withV3, "with-v3", true, "Backup v3 backend data")
cmd.MarkFlagRequired("data-dir")
cmd.MarkFlagRequired("backup-dir")
- cmd.MarkFlagDirname("data-dir")
- cmd.MarkFlagDirname("wal-dir")
- cmd.MarkFlagDirname("backup-dir")
- cmd.MarkFlagDirname("backup-wal-dir")
return cmd
}
@@ -99,7 +94,9 @@ func newDesiredCluster() desiredCluster {
},
RaftAttributes: membership.RaftAttributes{
PeerURLs: []string{"http://use-flag--force-new-cluster:2080"},
- }}},
+ },
+ },
+ },
confState: raftpb.ConfState{Voters: []uint64{nodeID}},
}
}
@@ -108,11 +105,6 @@ func newDesiredCluster() desiredCluster {
func HandleBackup(withV3 bool, srcDir string, destDir string, srcWAL string, destWAL string) error {
lg := GetLogger()
- if !withV3 {
- lg.Warn("-with-v3=false is not supported since etcd v3.6. Please use v3.5.x client as the last supporting this deprecated functionality.")
- return nil
- }
-
srcSnap := datadir.ToSnapDir(srcDir)
destSnap := datadir.ToSnapDir(destDir)
@@ -124,7 +116,7 @@ func HandleBackup(withV3 bool, srcDir string, destDir string, srcWAL string, des
destWAL = datadir.ToWalDir(destDir)
}
- if err := fileutil.CreateDirAll(lg, destSnap); err != nil {
+ if err := fileutil.CreateDirAll(destSnap); err != nil {
lg.Fatal("failed creating backup snapshot dir", zap.String("dest-snap", destSnap), zap.Error(err))
}
@@ -133,8 +125,8 @@ func HandleBackup(withV3 bool, srcDir string, destDir string, srcWAL string, des
desired := newDesiredCluster()
walsnap := saveSnap(lg, destSnap, srcSnap, &desired)
- metadata, state, ents := translateWAL(lg, srcWAL, walsnap)
- saveDB(lg, destDbPath, srcDbPath, state.Commit, state.Term, &desired)
+ metadata, state, ents := translateWAL(lg, srcWAL, walsnap, withV3)
+ saveDB(lg, destDbPath, srcDbPath, state.Commit, state.Term, &desired, withV3)
neww, err := wal.Create(lg, destWAL, pbutil.MustMarshal(&metadata))
if err != nil {
@@ -195,7 +187,7 @@ func mustTranslateV2store(lg *zap.Logger, storeData []byte, desired *desiredClus
return outputData
}
-func translateWAL(lg *zap.Logger, srcWAL string, walsnap walpb.Snapshot) (etcdserverpb.Metadata, raftpb.HardState, []raftpb.Entry) {
+func translateWAL(lg *zap.Logger, srcWAL string, walsnap walpb.Snapshot, v3 bool) (etcdserverpb.Metadata, raftpb.HardState, []raftpb.Entry) {
w, err := wal.OpenForRead(lg, srcWAL, walsnap)
if err != nil {
lg.Fatal("wal.OpenForRead failed", zap.Error(err))
@@ -223,7 +215,7 @@ func translateWAL(lg *zap.Logger, srcWAL string, walsnap walpb.Snapshot) (etcdse
// TERM changes (so there are superflous entries from previous term).
if ents[i].Type == raftpb.EntryConfChange {
- lg.Info("ignoring EntryConfChange raft entry")
+ lg.Info("忽略 EntryConfChange 日志项")
raftEntryToNoOp(&ents[i])
continue
}
@@ -238,20 +230,27 @@ func translateWAL(lg *zap.Logger, srcWAL string, walsnap walpb.Snapshot) (etcdse
}
if v2Req != nil && v2Req.Method == "PUT" && memberAttrRE.MatchString(v2Req.Path) {
- lg.Info("ignoring member attribute update on",
- zap.Stringer("entry", &ents[i]),
- zap.String("v2Req.Path", v2Req.Path))
+ lg.Info("忽略成员更新", zap.Stringer("entry", &ents[i]), zap.String("v2Req.Path", v2Req.Path))
raftEntryToNoOp(&ents[i])
continue
}
+ if v2Req != nil {
+ lg.Debug("preserving log entry", zap.Stringer("entry", &ents[i]))
+ }
+
if raftReq.ClusterMemberAttrSet != nil {
lg.Info("ignoring cluster_member_attr_set")
raftEntryToNoOp(&ents[i])
continue
}
- lg.Debug("preserving log entry", zap.Stringer("entry", &ents[i]))
+ if v3 || raftReq.Header == nil {
+ lg.Debug("preserving log entry", zap.Stringer("entry", &ents[i]))
+ continue
+ }
+ lg.Info("ignoring v3 raft entry")
+ raftEntryToNoOp(&ents[i])
}
var metadata etcdserverpb.Metadata
pbutil.MustUnmarshal(&metadata, wmetadata)
@@ -266,52 +265,65 @@ func raftEntryToNoOp(entry *raftpb.Entry) {
}
// saveDB copies the v3 backend and strips cluster information.
-func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desired *desiredCluster) {
+func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desired *desiredCluster, v3 bool) {
// open src db to safely copy db state
- var src *bolt.DB
- ch := make(chan *bolt.DB, 1)
- go func() {
- db, err := bolt.Open(srcDB, 0444, &bolt.Options{ReadOnly: true})
- if err != nil {
- lg.Fatal("bolt.Open FAILED", zap.Error(err))
+ if v3 {
+ var src *bolt.DB
+ ch := make(chan *bolt.DB, 1)
+ go func() {
+ db, err := bolt.Open(srcDB, 0o444, &bolt.Options{ReadOnly: true})
+ if err != nil {
+ lg.Fatal("bolt.Open FAILED", zap.Error(err))
+ }
+ ch <- db
+ }()
+ select {
+ case src = <-ch:
+ case <-time.After(time.Second):
+ lg.Fatal("timed out waiting to acquire lock on", zap.String("srcDB", srcDB))
+ src = <-ch
}
- ch <- db
- }()
- select {
- case src = <-ch:
- case <-time.After(time.Second):
- lg.Fatal("timed out waiting to acquire lock on", zap.String("srcDB", srcDB))
- }
- defer src.Close()
+ defer src.Close()
- tx, err := src.Begin(false)
- if err != nil {
- lg.Fatal("bbolt.BeginTx failed", zap.Error(err))
- }
+ tx, err := src.Begin(false)
+ if err != nil {
+ lg.Fatal("bbolt.BeginTx failed", zap.Error(err))
+ }
- // copy srcDB to destDB
- dest, err := os.Create(destDB)
- if err != nil {
- lg.Fatal("creation of destination file failed", zap.String("dest", destDB), zap.Error(err))
- }
- if _, err := tx.WriteTo(dest); err != nil {
- lg.Fatal("bbolt write to destination file failed", zap.String("dest", destDB), zap.Error(err))
- }
- dest.Close()
- if err := tx.Rollback(); err != nil {
- lg.Fatal("bbolt tx.Rollback failed", zap.String("dest", destDB), zap.Error(err))
+ // copy srcDB to destDB
+ dest, err := os.Create(destDB)
+ if err != nil {
+ lg.Fatal("creation of destination file failed", zap.String("dest", destDB), zap.Error(err))
+ }
+ if _, err := tx.WriteTo(dest); err != nil {
+ lg.Fatal("bbolt write to destination file failed", zap.String("dest", destDB), zap.Error(err))
+ }
+ dest.Close()
+ if err := tx.Rollback(); err != nil {
+ lg.Fatal("bbolt tx.Rollback failed", zap.String("dest", destDB), zap.Error(err))
+ }
}
- // trim membership info
- be := backend.NewDefaultBackend(lg, destDB)
+ be := backend.NewDefaultBackend(destDB)
defer be.Close()
- ms := schema.NewMembershipBackend(lg, be)
- if err := ms.TrimClusterFromBackend(); err != nil {
+
+ if err := membership.TrimClusterFromBackend(be); err != nil {
lg.Fatal("bbolt tx.Membership failed", zap.Error(err))
}
raftCluster := membership.NewClusterFromMembers(lg, desired.clusterId, desired.members)
raftCluster.SetID(desired.nodeId, desired.clusterId)
- raftCluster.SetBackend(ms)
+ raftCluster.SetBackend(be)
raftCluster.PushMembershipToStorage()
+
+ if !v3 {
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ cindex.UnsafeCreateMetaBucket(tx)
+ cindex.UnsafeUpdateConsistentIndex(tx, idx, term, false)
+ } else {
+ // Thanks to translateWAL not moving entries, but just replacing them with
+ // 'empty', there is no need to update the consistency index.
+ }
}
diff --git a/etcdutl/etcdutl/common.go b/etcdutl/etcdutl/common.go
index d54827d0457..305bf20a23c 100644
--- a/etcdutl/etcdutl/common.go
+++ b/etcdutl/etcdutl/common.go
@@ -15,15 +15,13 @@
package etcdutl
import (
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
-
- "go.etcd.io/etcd/client/pkg/v3/logutil"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
)
func GetLogger() *zap.Logger {
- config := logutil.DefaultZapLoggerConfig
+ config := zap.NewProductionConfig()
config.Encoding = "console"
config.EncoderConfig.EncodeTime = zapcore.RFC3339TimeEncoder
lg, err := config.Build()
diff --git a/etcdutl/etcdutl/completion_commmand.go b/etcdutl/etcdutl/completion_commmand.go
deleted file mode 100644
index 792799b15b0..00000000000
--- a/etcdutl/etcdutl/completion_commmand.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdutl
-
-import (
- "os"
-
- "github.com/spf13/cobra"
-)
-
-func NewCompletionCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "completion [bash|zsh|fish|powershell]",
- Short: "Generate completion script",
- Long: `To load completions:
-
-Bash:
-
- $ source <(etcdutl completion bash)
-
- # To load completions for each session, execute once:
- # Linux:
- $ etcdutl completion bash > /etc/bash_completion.d/etcdutl
- # macOS:
- $ etcdutl completion bash > /usr/local/etc/bash_completion.d/etcdutl
-
-Zsh:
-
- # If shell completion is not already enabled in your environment,
- # you will need to enable it. You can execute the following once:
-
- $ echo "autoload -U compinit; compinit" >> ~/.zshrc
-
- # To load completions for each session, execute once:
- $ etcdutl completion zsh > "${fpath[1]}/_etcdutl"
-
- # You will need to start a new shell for this setup to take effect.
-
-fish:
-
- $ etcdutl completion fish | source
-
- # To load completions for each session, execute once:
- $ etcdutl completion fish > ~/.config/fish/completions/etcdutl.fish
-
-PowerShell:
-
- PS> etcdutl completion powershell | Out-String | Invoke-Expression
-
- # To load completions for every new session, run:
- PS> etcdutl completion powershell > etcdutl.ps1
- # and source this file from your PowerShell profile.
-`,
- DisableFlagsInUseLine: true,
- ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
- Args: cobra.ExactValidArgs(1),
- Run: func(cmd *cobra.Command, args []string) {
- switch args[0] {
- case "bash":
- cmd.Root().GenBashCompletion(os.Stdout)
- case "zsh":
- cmd.Root().GenZshCompletion(os.Stdout)
- case "fish":
- cmd.Root().GenFishCompletion(os.Stdout, true)
- case "powershell":
- cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
- }
- },
- }
-
- return cmd
-}
diff --git a/etcdutl/etcdutl/defrag_command.go b/etcdutl/etcdutl/defrag_command.go
index d8077ae8bfb..d3387ffe0c5 100644
--- a/etcdutl/etcdutl/defrag_command.go
+++ b/etcdutl/etcdutl/defrag_command.go
@@ -19,35 +19,29 @@ import (
"os"
"time"
+ "github.com/ls-2018/etcd_cn/etcd/datadir"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
-
- "go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.etcd.io/etcd/server/v3/storage/backend"
- "go.etcd.io/etcd/server/v3/storage/datadir"
)
-var (
- defragDataDir string
-)
+var defragDataDir string
-// NewDefragCommand returns the cobra command for "Defrag".
func NewDefragCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "defrag",
- Short: "Defragments the storage of the etcd",
+ Short: "清理etcd内存碎片",
Run: defragCommandFunc,
}
- cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "Required. Defragments a data directory not in use by etcd.")
+ cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "")
cmd.MarkFlagRequired("data-dir")
- cmd.MarkFlagDirname("data-dir")
return cmd
}
func defragCommandFunc(cmd *cobra.Command, args []string) {
err := DefragData(defragDataDir)
if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError,
- fmt.Errorf("Failed to defragment etcd data[%s] (%v)", defragDataDir, err))
+ cobrautl.ExitWithError(cobrautl.ExitError, fmt.Errorf("对etcd数据进行碎片整理失败[%s] (%v)", defragDataDir, err))
}
}
@@ -58,7 +52,7 @@ func DefragData(dataDir string) error {
dbDir := datadir.ToBackendFileName(dataDir)
go func() {
defer close(bch)
- cfg := backend.DefaultBackendConfig(lg)
+ cfg := backend.DefaultBackendConfig()
cfg.Logger = lg
cfg.Path = dbDir
be = backend.New(cfg)
@@ -66,8 +60,7 @@ func DefragData(dataDir string) error {
select {
case <-bch:
case <-time.After(time.Second):
- fmt.Fprintf(os.Stderr, "waiting for etcd to close and release its lock on %q. "+
- "To defrag a running etcd instance, use `etcdctl defrag` instead.\n", dbDir)
+ fmt.Fprintf(os.Stderr, "等待etcd关闭并释放其对%q的锁定.要对正在运行的etcd实例进行碎片整理请省略-data-dir. \n", dbDir)
<-bch
}
return be.Defrag()
diff --git a/etcdutl/etcdutl/migrate_command.go b/etcdutl/etcdutl/migrate_command.go
deleted file mode 100644
index 521cf8ba80c..00000000000
--- a/etcdutl/etcdutl/migrate_command.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdutl
-
-import (
- "fmt"
- "strings"
-
- "github.com/coreos/go-semver/semver"
- "github.com/spf13/cobra"
- "go.uber.org/zap"
-
- "go.etcd.io/etcd/api/v3/version"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.etcd.io/etcd/server/v3/storage/backend"
- "go.etcd.io/etcd/server/v3/storage/datadir"
- "go.etcd.io/etcd/server/v3/storage/schema"
- "go.etcd.io/etcd/server/v3/storage/wal"
- "go.etcd.io/etcd/server/v3/storage/wal/walpb"
-)
-
-// NewMigrateCommand prints out the version of etcd.
-func NewMigrateCommand() *cobra.Command {
- o := newMigrateOptions()
- cmd := &cobra.Command{
- Use: "migrate",
- Short: "Migrates schema of etcd data dir files to make them compatible with different etcd version",
- Run: func(cmd *cobra.Command, args []string) {
- cfg, err := o.Config()
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
- }
- err = migrateCommandFunc(cfg)
- if err != nil {
- cobrautl.ExitWithError(cobrautl.ExitError, err)
- }
- },
- }
- o.AddFlags(cmd)
- return cmd
-}
-
-type migrateOptions struct {
- dataDir string
- targetVersion string
- force bool
-}
-
-func newMigrateOptions() *migrateOptions {
- return &migrateOptions{}
-}
-
-func (o *migrateOptions) AddFlags(cmd *cobra.Command) {
- cmd.Flags().StringVar(&o.dataDir, "data-dir", o.dataDir, "Path to the etcd data dir")
- cmd.MarkFlagRequired("data-dir")
- cmd.MarkFlagDirname("data-dir")
-
- cmd.Flags().StringVar(&o.targetVersion, "target-version", o.targetVersion, `Target etcd version to migrate contents of data dir. Minimal value 3.5. Format "X.Y" for example 3.6.`)
- cmd.MarkFlagRequired("target-version")
-
- cmd.Flags().BoolVar(&o.force, "force", o.force, "Ignore migration failure and forcefully override storage version. Not recommended.")
-}
-
-func (o *migrateOptions) Config() (*migrateConfig, error) {
- c := &migrateConfig{
- force: o.force,
- lg: GetLogger(),
- }
- var err error
- dotCount := strings.Count(o.targetVersion, ".")
- if dotCount != 1 {
- return nil, fmt.Errorf(`wrong target version format, expected "X.Y", got %q`, o.targetVersion)
- }
- c.targetVersion, err = semver.NewVersion(o.targetVersion + ".0")
- if err != nil {
- return nil, fmt.Errorf("failed to parse target version: %v", err)
- }
- if c.targetVersion.LessThan(version.V3_5) {
- return nil, fmt.Errorf(`target version %q not supported. Minimal "3.5"`, storageVersionToString(c.targetVersion))
- }
-
- dbPath := datadir.ToBackendFileName(o.dataDir)
- c.be = backend.NewDefaultBackend(GetLogger(), dbPath)
-
- walPath := datadir.ToWalDir(o.dataDir)
- w, err := wal.OpenForRead(c.lg, walPath, walpb.Snapshot{})
- if err != nil {
- return nil, fmt.Errorf(`failed to open wal: %v`, err)
- }
- defer w.Close()
- c.walVersion, err = wal.ReadWALVersion(w)
- if err != nil {
- return nil, fmt.Errorf(`failed to read wal: %v`, err)
- }
-
- return c, nil
-}
-
-type migrateConfig struct {
- lg *zap.Logger
- be backend.Backend
- targetVersion *semver.Version
- walVersion schema.WALVersion
- force bool
-}
-
-func migrateCommandFunc(c *migrateConfig) error {
- defer c.be.Close()
- tx := c.be.BatchTx()
- current, err := schema.DetectSchemaVersion(c.lg, c.be.ReadTx())
- if err != nil {
- c.lg.Error("failed to detect storage version. Please make sure you are using data dir from etcd v3.5 and older")
- return err
- }
- if current == *c.targetVersion {
- c.lg.Info("storage version up-to-date", zap.String("storage-version", storageVersionToString(¤t)))
- return nil
- }
- err = schema.Migrate(c.lg, tx, c.walVersion, *c.targetVersion)
- if err != nil {
- if !c.force {
- return err
- }
- c.lg.Info("normal migrate failed, trying with force", zap.Error(err))
- migrateForce(c.lg, tx, c.targetVersion)
- }
- c.be.ForceCommit()
- return nil
-}
-
-func migrateForce(lg *zap.Logger, tx backend.BatchTx, target *semver.Version) {
- tx.LockOutsideApply()
- defer tx.Unlock()
- // Storage version is only supported since v3.6
- if target.LessThan(version.V3_6) {
- schema.UnsafeClearStorageVersion(tx)
- lg.Warn("forcefully cleared storage version")
- } else {
- schema.UnsafeSetStorageVersion(tx, target)
- lg.Warn("forcefully set storage version", zap.String("storage-version", storageVersionToString(target)))
- }
-}
-
-func storageVersionToString(ver *semver.Version) string {
- return fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
-}
diff --git a/etcdutl/etcdutl/printer.go b/etcdutl/etcdutl/printer.go
index 7d65366065f..72d1170508f 100644
--- a/etcdutl/etcdutl/printer.go
+++ b/etcdutl/etcdutl/printer.go
@@ -18,17 +18,14 @@ import (
"errors"
"fmt"
+ "github.com/ls-2018/etcd_cn/etcdutl/snapshot"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
- "go.etcd.io/etcd/etcdutl/v3/snapshot"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
-
"github.com/dustin/go-humanize"
)
-var (
- OutputFormat string
-)
+var OutputFormat string
type printer interface {
DBStatus(snapshot.Status)
@@ -67,13 +64,12 @@ func newPrinterUnsupported(n string) printer {
func (p *printerUnsupported) DBStatus(snapshot.Status) { p.p(nil) }
func makeDBStatusTable(ds snapshot.Status) (hdr []string, rows [][]string) {
- hdr = []string{"hash", "revision", "total keys", "total size", "version"}
+ hdr = []string{"hash", "revision", "total keys", "total size"}
rows = append(rows, []string{
fmt.Sprintf("%x", ds.Hash),
fmt.Sprint(ds.Revision),
fmt.Sprint(ds.TotalKey),
humanize.Bytes(uint64(ds.TotalSize)),
- ds.Version,
})
return hdr, rows
}
diff --git a/etcdutl/etcdutl/printer_fields.go b/etcdutl/etcdutl/printer_fields.go
index d534e396ffe..cddbf92869e 100644
--- a/etcdutl/etcdutl/printer_fields.go
+++ b/etcdutl/etcdutl/printer_fields.go
@@ -17,7 +17,7 @@ package etcdutl
import (
"fmt"
- "go.etcd.io/etcd/etcdutl/v3/snapshot"
+ "github.com/ls-2018/etcd_cn/etcdutl/snapshot"
)
type fieldsPrinter struct{ printer }
@@ -27,5 +27,4 @@ func (p *fieldsPrinter) DBStatus(r snapshot.Status) {
fmt.Println(`"Revision" :`, r.Revision)
fmt.Println(`"Keys" :`, r.TotalKey)
fmt.Println(`"Size" :`, r.TotalSize)
- fmt.Println(`"Version" :`, r.Version)
}
diff --git a/etcdutl/etcdutl/printer_json.go b/etcdutl/etcdutl/printer_json.go
index 38fe3e4548e..92b106f42d5 100644
--- a/etcdutl/etcdutl/printer_json.go
+++ b/etcdutl/etcdutl/printer_json.go
@@ -19,7 +19,7 @@ import (
"fmt"
"os"
- "go.etcd.io/etcd/etcdutl/v3/snapshot"
+ "github.com/ls-2018/etcd_cn/etcdutl/snapshot"
)
type jsonPrinter struct {
diff --git a/etcdutl/etcdutl/printer_protobuf.go b/etcdutl/etcdutl/printer_protobuf.go
index 0a9003b475d..469a01c64ce 100644
--- a/etcdutl/etcdutl/printer_protobuf.go
+++ b/etcdutl/etcdutl/printer_protobuf.go
@@ -18,7 +18,7 @@ import (
"fmt"
"os"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
)
type pbPrinter struct{ printer }
diff --git a/etcdutl/etcdutl/printer_simple.go b/etcdutl/etcdutl/printer_simple.go
index 306ebf0c7f3..a87e8c9a084 100644
--- a/etcdutl/etcdutl/printer_simple.go
+++ b/etcdutl/etcdutl/printer_simple.go
@@ -18,11 +18,10 @@ import (
"fmt"
"strings"
- "go.etcd.io/etcd/etcdutl/v3/snapshot"
+ "github.com/ls-2018/etcd_cn/etcdutl/snapshot"
)
-type simplePrinter struct {
-}
+type simplePrinter struct{}
func (s *simplePrinter) DBStatus(ds snapshot.Status) {
_, rows := makeDBStatusTable(ds)
diff --git a/etcdutl/etcdutl/printer_table.go b/etcdutl/etcdutl/printer_table.go
index 2f8f81d4e6a..02dccaac715 100644
--- a/etcdutl/etcdutl/printer_table.go
+++ b/etcdutl/etcdutl/printer_table.go
@@ -17,7 +17,7 @@ package etcdutl
import (
"os"
- "go.etcd.io/etcd/etcdutl/v3/snapshot"
+ "github.com/ls-2018/etcd_cn/etcdutl/snapshot"
"github.com/olekukonko/tablewriter"
)
diff --git a/etcdutl/etcdutl/snapshot_command.go b/etcdutl/etcdutl/snapshot_command.go
index 28df31f8dd0..425719e89f9 100644
--- a/etcdutl/etcdutl/snapshot_command.go
+++ b/etcdutl/etcdutl/snapshot_command.go
@@ -18,9 +18,9 @@ import (
"fmt"
"strings"
- "go.etcd.io/etcd/etcdutl/v3/snapshot"
- "go.etcd.io/etcd/pkg/v3/cobrautl"
- "go.etcd.io/etcd/server/v3/storage/datadir"
+ "github.com/ls-2018/etcd_cn/etcd/datadir"
+ "github.com/ls-2018/etcd_cn/etcdutl/snapshot"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
"github.com/spf13/cobra"
)
@@ -46,26 +46,24 @@ func NewSnapshotCommand() *cobra.Command {
Use: "snapshot ",
Short: "Manages etcd node snapshots",
}
- cmd.AddCommand(NewSnapshotRestoreCommand())
- cmd.AddCommand(newSnapshotStatusCommand())
+ cmd.AddCommand(NewSnapshotRestoreCommand()) // restore
+ cmd.AddCommand(newSnapshotStatusCommand()) // status
return cmd
}
func newSnapshotStatusCommand() *cobra.Command {
return &cobra.Command{
Use: "status ",
- Short: "Gets backend snapshot status of a given file",
- Long: `When --write-out is set to simple, this command prints out comma-separated status lists for each endpoint.
-The items in the lists are hash, revision, total keys, total size.
-`,
- Run: SnapshotStatusCommandFunc,
+ Short: "从给定的文件获取快照状态",
+ Long: ``,
+ Run: SnapshotStatusCommandFunc,
}
}
func NewSnapshotRestoreCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "restore --data-dir {output dir} [options]",
- Short: "Restores an etcd member snapshot to an etcd directory",
+ Short: "将etcd成员快照恢复到etcd目录",
Run: snapshotRestoreCommandFunc,
}
cmd.Flags().StringVar(&restoreDataDir, "data-dir", "", "Path to the output data directory")
@@ -76,8 +74,7 @@ func NewSnapshotRestoreCommand() *cobra.Command {
cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member")
cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)")
- cmd.MarkFlagDirname("data-dir")
- cmd.MarkFlagDirname("wal-dir")
+ cmd.MarkFlagRequired("data-dir")
return cmd
}
@@ -99,8 +96,7 @@ func SnapshotStatusCommandFunc(cmd *cobra.Command, args []string) {
}
func snapshotRestoreCommandFunc(_ *cobra.Command, args []string) {
- SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir,
- restorePeerURLs, restoreName, skipHashCheck, args)
+ SnapshotRestoreCommandFunc(restoreCluster, restoreClusterToken, restoreDataDir, restoreWalDir, restorePeerURLs, restoreName, skipHashCheck, args)
}
func SnapshotRestoreCommandFunc(restoreCluster string,
@@ -110,7 +106,8 @@ func SnapshotRestoreCommandFunc(restoreCluster string,
restorePeerURLs string,
restoreName string,
skipHashCheck bool,
- args []string) {
+ args []string,
+) {
if len(args) != 1 {
err := fmt.Errorf("snapshot restore requires exactly one argument")
cobrautl.ExitWithError(cobrautl.ExitBadArgs, err)
diff --git a/etcdutl/etcdutl/version_command.go b/etcdutl/etcdutl/version_command.go
deleted file mode 100644
index 1cb1a146b4b..00000000000
--- a/etcdutl/etcdutl/version_command.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2021 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdutl
-
-import (
- "fmt"
-
- "go.etcd.io/etcd/api/v3/version"
-
- "github.com/spf13/cobra"
-)
-
-// NewVersionCommand prints out the version of etcd.
-func NewVersionCommand() *cobra.Command {
- return &cobra.Command{
- Use: "version",
- Short: "Prints the version of etcdutl",
- Run: versionCommandFunc,
- }
-}
-
-func versionCommandFunc(cmd *cobra.Command, args []string) {
- fmt.Println("etcdutl version:", version.Version)
- fmt.Println("API version:", version.APIVersion)
-}
diff --git a/etcdutl/go.mod b/etcdutl/go.mod
deleted file mode 100644
index 02254541fe9..00000000000
--- a/etcdutl/go.mod
+++ /dev/null
@@ -1,75 +0,0 @@
-module go.etcd.io/etcd/etcdutl/v3
-
-go 1.19
-
-replace (
- go.etcd.io/etcd/api/v3 => ../api
- go.etcd.io/etcd/client/pkg/v3 => ../client/pkg
- go.etcd.io/etcd/client/v2 => ../client/v2
- go.etcd.io/etcd/client/v3 => ../client/v3
- go.etcd.io/etcd/pkg/v3 => ../pkg
- go.etcd.io/etcd/server/v3 => ../server
-)
-
-// Bad imports are sometimes causing attempts to pull that code.
-// This makes the error more explicit.
-replace (
- go.etcd.io/etcd => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/etcd/v3 => ./FORBIDDEN_DEPENDENCY
- go.etcd.io/tests/v3 => ./FORBIDDEN_DEPENDENCY
-)
-
-require (
- github.com/coreos/go-semver v0.3.1
- github.com/dustin/go-humanize v1.0.1
- github.com/olekukonko/tablewriter v0.0.5
- github.com/spf13/cobra v1.6.1
- go.etcd.io/bbolt v1.3.7
- go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/client/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/server/v3 v3.6.0-alpha.0
- go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a
- go.uber.org/zap v1.24.0
-)
-
-require (
- github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
- github.com/coreos/go-systemd/v22 v22.5.0 // indirect
- github.com/go-logr/logr v1.2.3 // indirect
- github.com/go-logr/stdr v1.2.2 // indirect
- github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang-jwt/jwt/v4 v4.4.3 // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/google/btree v1.1.2 // indirect
- github.com/inconshreveable/mousetrap v1.0.1 // indirect
- github.com/jonboulle/clockwork v0.3.0 // indirect
- github.com/mattn/go-runewidth v0.0.9 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
- github.com/prometheus/client_golang v1.14.0 // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/procfs v0.8.0 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
- github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
- go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 // indirect
- go.opentelemetry.io/otel v1.11.2 // indirect
- go.opentelemetry.io/otel/metric v0.34.0 // indirect
- go.opentelemetry.io/otel/trace v1.11.2 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/goleak v1.1.12 // indirect
- go.uber.org/multierr v1.9.0 // indirect
- golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
- golang.org/x/net v0.5.0 // indirect
- golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb // indirect
- golang.org/x/sys v0.4.0 // indirect
- golang.org/x/text v0.6.0 // indirect
- golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
- google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect
- google.golang.org/grpc v1.51.0 // indirect
- google.golang.org/protobuf v1.28.1 // indirect
- sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
-)
diff --git a/etcdutl/go.sum b/etcdutl/go.sum
deleted file mode 100644
index 0ffc93d9e20..00000000000
--- a/etcdutl/go.sum
+++ /dev/null
@@ -1,599 +0,0 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
-github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
-github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
-github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
-github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
-github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
-github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
-github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU=
-github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
-github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
-github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg=
-github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
-github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
-github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
-github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
-github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
-github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
-go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
-go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a h1:Znv2XJyAf/fsJsFNt9toO8uyXwwHQ44wxqsvdSxipj4=
-go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a/go.mod h1:eMshmuwXLWZrjHXN8ZgYrOMQRSbHqi5M84DEZWhG+o4=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 h1:+uFejS4DCfNH6d3xODVIGsdhzgzhh45p9gpbHQMbdZI=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc=
-go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0=
-go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI=
-go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8=
-go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8=
-go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0=
-go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
-go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
-go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
-go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
-golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g=
-golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
-golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
-google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
diff --git a/etcdutl/main.go b/etcdutl/main.go
index bff0b1d869b..3f94e441b4b 100644
--- a/etcdutl/main.go
+++ b/etcdutl/main.go
@@ -16,7 +16,7 @@
package main
import (
- "go.etcd.io/etcd/pkg/v3/cobrautl"
+ "github.com/ls-2018/etcd_cn/pkg/cobrautl"
)
func main() {
@@ -24,3 +24,7 @@ func main() {
cobrautl.ExitWithError(cobrautl.ExitError, err)
}
}
+
+// snapshot status ../default.etcd/member/snap/bolt.db --write-out=table
+// snapshot restore ./123
+// defrag --data-dir ../default.etcd
diff --git a/etcdutl/main_test.go b/etcdutl/main_test.go
deleted file mode 100644
index 1fe58afb837..00000000000
--- a/etcdutl/main_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "log"
- "os"
- "strings"
- "testing"
-)
-
-func SplitTestArgs(args []string) (testArgs, appArgs []string) {
- for i, arg := range args {
- switch {
- case strings.HasPrefix(arg, "-test."):
- testArgs = append(testArgs, arg)
- case i == 0:
- appArgs = append(appArgs, arg)
- testArgs = append(testArgs, arg)
- default:
- appArgs = append(appArgs, arg)
- }
- }
- return
-}
-
-// TestEmpty is empty test to avoid no-tests warning.
-func TestEmpty(t *testing.T) {}
-
-/**
- * The purpose of this "test" is to run etcdctl with code-coverage
- * collection turned on. The technique is documented here:
- *
- * https://www.cyphar.com/blog/post/20170412-golang-integration-coverage
- */
-func TestMain(m *testing.M) {
- // don't launch etcdutl when invoked via go test
- if strings.HasSuffix(os.Args[0], "etcdutl.test") {
- return
- }
-
- testArgs, appArgs := SplitTestArgs(os.Args)
-
- os.Args = appArgs
-
- err := Start()
- if err != nil {
- log.Fatalf("etcdctl failed with: %v", err)
- }
-
- // This will generate coverage files:
- os.Args = testArgs
- m.Run()
-}
diff --git a/etcdutl/over_ctl.go b/etcdutl/over_ctl.go
new file mode 100644
index 00000000000..8463b8fa3dc
--- /dev/null
+++ b/etcdutl/over_ctl.go
@@ -0,0 +1,51 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package etcdutl contains the main entry point for the etcdutl.
+package main
+
+import (
+ "github.com/ls-2018/etcd_cn/etcdutl/etcdutl"
+ "github.com/spf13/cobra"
+)
+
+const (
+ cliName = "etcdutl"
+ cliDescription = "An administrative command line tool for etcd3."
+)
+
+var rootCmd = &cobra.Command{
+ Use: cliName,
+ Short: cliDescription,
+ SuggestFor: []string{"etcdutl"},
+}
+
+func init() {
+ rootCmd.PersistentFlags().StringVarP(&etcdutl.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)")
+
+ rootCmd.AddCommand(
+ etcdutl.NewBackupCommand(), // 备份
+ etcdutl.NewDefragCommand(), // 清理内存碎片
+ etcdutl.NewSnapshotCommand(), // 快照
+ )
+}
+
+func Start() error {
+ rootCmd.SetHelpTemplate(`{{.UsageString}}`)
+ return rootCmd.Execute()
+}
+
+func init() {
+ cobra.EnablePrefixMatching = true
+}
diff --git a/etcdutl/snapshot/doc.go b/etcdutl/snapshot/doc.go
deleted file mode 100644
index 1c761be70d1..00000000000
--- a/etcdutl/snapshot/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package snapshot implements utilities around etcd snapshot.
-package snapshot
diff --git a/etcdutl/snapshot/over_v3_snapshot.go b/etcdutl/snapshot/over_v3_snapshot.go
new file mode 100644
index 00000000000..bebcb4c259b
--- /dev/null
+++ b/etcdutl/snapshot/over_v3_snapshot.go
@@ -0,0 +1,460 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snapshot
+
+import (
+ "crypto/sha256"
+ "encoding/json"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+
+ "github.com/ls-2018/etcd_cn/raft"
+
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/fileutil"
+ "github.com/ls-2018/etcd_cn/client_sdk/pkg/types"
+ "github.com/ls-2018/etcd_cn/etcd/config"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/membership"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/snap"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/api/v2store"
+ "github.com/ls-2018/etcd_cn/etcd/etcdserver/cindex"
+ "github.com/ls-2018/etcd_cn/etcd/mvcc/backend"
+ "github.com/ls-2018/etcd_cn/etcd/verify"
+ "github.com/ls-2018/etcd_cn/etcd/wal"
+ "github.com/ls-2018/etcd_cn/etcd/wal/walpb"
+ "github.com/ls-2018/etcd_cn/offical/etcdserverpb"
+ "github.com/ls-2018/etcd_cn/raft/raftpb"
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+type Manager interface {
+ Status(dbPath string) (Status, error) // 快照信息
+ Restore(cfg RestoreConfig) error
+}
+
+// NewV3 v3版本的快照管理
+func NewV3(lg *zap.Logger) Manager {
+ if lg == nil {
+ lg = zap.NewExample()
+ }
+ return &v3Manager{lg: lg}
+}
+
+type v3Manager struct {
+ lg *zap.Logger
+
+ name string
+ srcDbPath string
+ walDir string
+ snapDir string
+ cl *membership.RaftCluster
+
+ skipHashCheck bool
+}
+
+func hasChecksum(n int64) bool {
+ return (n % 512) == sha256.Size
+}
+
+// RestoreConfig configures snapshot restore operation.
+type RestoreConfig struct {
+ // SnapshotPath is the path of snapshot file to restore from.
+ SnapshotPath string
+
+ // Name is the human-readable name of this member.
+ Name string
+
+ // OutputDataDir is the target data directory to save restored data.
+ // OutputDataDir should not conflict with existing etcd data directory.
+ // If OutputDataDir already exists, it will return an error to prevent
+ // unintended data directory overwrites.
+ // If empty, defaults to "[Name].etcd" if not given.
+ OutputDataDir string
+ // OutputWALDir is the target WAL data directory.
+ // If empty, defaults to "[OutputDataDir]/member/wal" if not given.
+ OutputWALDir string
+
+ // PeerURLs is a list of member's peer URLs to advertise to the rest of the cluster.
+ PeerURLs []string
+
+ // InitialCluster is the initial cluster configuration for restore bootstrap.
+ InitialCluster string
+ // InitialClusterToken is the initial cluster token for etcd cluster during restore bootstrap.
+ InitialClusterToken string
+
+ // SkipHashCheck is "true" to ignore snapshot integrity hash value
+ // (required if copied from data directory).
+ SkipHashCheck bool
+}
+
+// Restore restores a new etcd data directory from given snapshot file.
+func (s *v3Manager) Restore(cfg RestoreConfig) error {
+ pURLs, err := types.NewURLs(cfg.PeerURLs)
+ if err != nil {
+ return err
+ }
+ var ics types.URLsMap
+ ics, err = types.NewURLsMap(cfg.InitialCluster)
+ if err != nil {
+ return err
+ }
+
+ srv := config.ServerConfig{
+ Logger: s.lg,
+ Name: cfg.Name,
+ PeerURLs: pURLs,
+ InitialPeerURLsMap: ics,
+ InitialClusterToken: cfg.InitialClusterToken,
+ }
+ if err = srv.VerifyBootstrap(); err != nil {
+ return err
+ }
+
+ s.cl, err = membership.NewClusterFromURLsMap(s.lg, cfg.InitialClusterToken, ics)
+ if err != nil {
+ return err
+ }
+
+ dataDir := cfg.OutputDataDir
+ if dataDir == "" {
+ dataDir = cfg.Name + ".etcd"
+ }
+ if fileutil.Exist(dataDir) && !fileutil.DirEmpty(dataDir) {
+ return fmt.Errorf("data-dir %q not empty or could not be read", dataDir)
+ }
+
+ walDir := cfg.OutputWALDir
+ if walDir == "" {
+ walDir = filepath.Join(dataDir, "member", "wal")
+ } else if fileutil.Exist(walDir) {
+ return fmt.Errorf("wal-dir %q exists", walDir)
+ }
+
+ s.name = cfg.Name
+ s.srcDbPath = cfg.SnapshotPath
+ s.walDir = walDir
+ s.snapDir = filepath.Join(dataDir, "member", "snap")
+ s.skipHashCheck = cfg.SkipHashCheck
+
+ s.lg.Info(
+ "restoring snapshot",
+ zap.String("path", s.srcDbPath),
+ zap.String("wal-dir", s.walDir),
+ zap.String("data-dir", dataDir),
+ zap.String("snap-dir", s.snapDir),
+ zap.Stack("stack"),
+ )
+
+ if err = s.saveDB(); err != nil {
+ return err
+ }
+ hardstate, err := s.saveWALAndSnap()
+ if err != nil {
+ return err
+ }
+
+ if err := s.updateCIndex(hardstate.Commit, hardstate.Term); err != nil {
+ return err
+ }
+
+ s.lg.Info(
+ "restored snapshot",
+ zap.String("path", s.srcDbPath),
+ zap.String("wal-dir", s.walDir),
+ zap.String("data-dir", dataDir),
+ zap.String("snap-dir", s.snapDir),
+ )
+
+ return verify.VerifyIfEnabled(verify.Config{
+ ExactIndex: true,
+ Logger: s.lg,
+ DataDir: dataDir,
+ })
+}
+
+func (s *v3Manager) outDbPath() string {
+ return filepath.Join(s.snapDir, "db")
+}
+
+// saveDB 将数据库快照复制到快照目录中.
+func (s *v3Manager) saveDB() error {
+ err := s.copyAndVerifyDB()
+ if err != nil {
+ return err
+ }
+
+ be := backend.NewDefaultBackend(s.outDbPath())
+ defer be.Close()
+
+ err = membership.TrimMembershipFromBackend(s.lg, be)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *v3Manager) copyAndVerifyDB() error {
+ srcf, ferr := os.Open(s.srcDbPath)
+ if ferr != nil {
+ return ferr
+ }
+ defer srcf.Close()
+
+ // 获取快照完整性哈希值
+ if _, err := srcf.Seek(-sha256.Size, io.SeekEnd); err != nil {
+ return err
+ }
+ sha := make([]byte, sha256.Size)
+ if _, err := srcf.Read(sha); err != nil {
+ return err
+ }
+ if _, err := srcf.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+
+ if err := fileutil.CreateDirAll(s.snapDir); err != nil {
+ return err
+ }
+
+ outDbPath := s.outDbPath()
+
+ db, dberr := os.OpenFile(outDbPath, os.O_RDWR|os.O_CREATE, 0o600)
+ if dberr != nil {
+ return dberr
+ }
+ dbClosed := false
+ defer func() {
+ if !dbClosed {
+ db.Close()
+ dbClosed = true
+ }
+ }()
+ if _, err := io.Copy(db, srcf); err != nil {
+ return err
+ }
+
+ // truncate away integrity hash, if any.
+ off, serr := db.Seek(0, io.SeekEnd)
+ if serr != nil {
+ return serr
+ }
+ hasHash := hasChecksum(off)
+ if hasHash {
+ if err := db.Truncate(off - sha256.Size); err != nil {
+ return err
+ }
+ }
+
+ if !hasHash && !s.skipHashCheck {
+ return fmt.Errorf("snapshot missing hash but --skip-hash-check=false")
+ }
+
+ if hasHash && !s.skipHashCheck {
+ // check for match
+ if _, err := db.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ h := sha256.New()
+ if _, err := io.Copy(h, db); err != nil {
+ return err
+ }
+ dbsha := h.Sum(nil)
+ if !reflect.DeepEqual(sha, dbsha) {
+ return fmt.Errorf("expected sha256 %v, got %v", sha, dbsha)
+ }
+ }
+
+ // db hash is OK, can now modify DB so it can be part of a new cluster
+ db.Close()
+ return nil
+}
+
+// saveWALAndSnap creates a WAL for the initial cluster
+//
+// TODO: This code ignores learners !!!
+func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
+ if err := fileutil.CreateDirAll(s.walDir); err != nil {
+ return nil, err
+ }
+
+ // add members again to persist them to the store we create.
+ st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
+ s.cl.SetStore(st)
+ be := backend.NewDefaultBackend(s.outDbPath())
+ defer be.Close()
+ s.cl.SetBackend(be)
+ for _, m := range s.cl.Members() {
+ s.cl.AddMember(m, true)
+ }
+
+ m := s.cl.MemberByName(s.name)
+ md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(s.cl.ID())}
+ metadata, merr := md.Marshal()
+ if merr != nil {
+ return nil, merr
+ }
+ w, walerr := wal.Create(s.lg, s.walDir, metadata)
+ if walerr != nil {
+ return nil, walerr
+ }
+ defer w.Close()
+
+ peers := make([]raft.Peer, len(s.cl.MemberIDs()))
+ for i, id := range s.cl.MemberIDs() {
+ ctx, err := json.Marshal((*s.cl).Member(id))
+ if err != nil {
+ return nil, err
+ }
+ peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
+ }
+
+ ents := make([]raftpb.Entry, len(peers))
+ nodeIDs := make([]uint64, len(peers))
+ for i, p := range peers {
+ nodeIDs[i] = p.ID
+ cc := raftpb.ConfChangeV1{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: p.ID,
+ Context: string(p.Context),
+ }
+ d, err := cc.Marshal()
+ if err != nil {
+ return nil, err
+ }
+ ents[i] = raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Term: 1,
+ Index: uint64(i + 1),
+ Data: d, // ok
+ }
+ }
+
+ commit, term := uint64(len(ents)), uint64(1)
+ hardState := raftpb.HardState{
+ Term: term,
+ Vote: peers[0].ID,
+ Commit: commit,
+ }
+ if err := w.Save(hardState, ents); err != nil {
+ return nil, err
+ }
+
+ b, berr := st.Save()
+ if berr != nil {
+ return nil, berr
+ }
+ confState := raftpb.ConfState{
+ Voters: nodeIDs,
+ }
+ raftSnap := raftpb.Snapshot{
+ Data: b,
+ Metadata: raftpb.SnapshotMetadata{
+ Index: commit,
+ Term: term,
+ ConfState: confState,
+ },
+ }
+ sn := snap.New(s.lg, s.snapDir)
+ if err := sn.SaveSnap(raftSnap); err != nil {
+ return nil, err
+ }
+ snapshot := walpb.Snapshot{Index: commit, Term: term, ConfState: &confState}
+ return &hardState, w.SaveSnapshot(snapshot)
+}
+
+func (s *v3Manager) updateCIndex(commit uint64, term uint64) error {
+ be := backend.NewDefaultBackend(s.outDbPath())
+ defer be.Close()
+
+ cindex.UpdateConsistentIndex(be.BatchTx(), commit, term, false)
+ return nil
+}
+
+// ---------------------------------------- OVER ----------------------------------------v----------
+
+type Status struct {
+ Hash uint32 `json:"hash"` // bolt.db哈希值
+ Revision int64 `json:"revision"` // 修订版本
+ TotalKey int `json:"totalKey"` // 总key数
+ TotalSize int64 `json:"totalSize"` // 实际存储大小
+}
+
+// Status 返回blot.db信息
+func (s *v3Manager) Status(dbPath string) (ds Status, err error) {
+ if _, err = os.Stat(dbPath); err != nil {
+ return ds, err
+ }
+
+ db, err := bolt.Open(dbPath, 0o400, &bolt.Options{ReadOnly: true})
+ if err != nil {
+ return ds, err
+ }
+ defer db.Close()
+
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ // 只读事务
+ // Bolt 将其key以字节排序的顺序存储在存储桶中.这使得对这些键的顺序迭代非常快.要遍历键我们将使用光标
+ if err = db.View(func(tx *bolt.Tx) error {
+ // 首先检查快照文件的完整性
+ var dbErrStrings []string
+ for dbErr := range tx.Check() {
+ dbErrStrings = append(dbErrStrings, dbErr.Error())
+ }
+ if len(dbErrStrings) > 0 {
+ return fmt.Errorf("快照文件完整性检查失败.发现%d错误.\n"+strings.Join(dbErrStrings, "\n"), len(dbErrStrings))
+ }
+ ds.TotalSize = tx.Size()
+ c := tx.Cursor()
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("无法获得桶的哈希值 %s", string(next))
+ }
+ if _, err := h.Write(next); err != nil {
+ return fmt.Errorf("不能写入bucket %s : %v", string(next), err)
+ }
+ iskeyb := string(next) == "key"
+ if err := b.ForEach(func(k, v []byte) error {
+ if _, err := h.Write(k); err != nil {
+ return fmt.Errorf("cannot write to bucket %s", err.Error())
+ }
+ if _, err := h.Write(v); err != nil {
+ return fmt.Errorf("cannot write to bucket %s", err.Error())
+ }
+ if iskeyb {
+ rev := bytesToRev(k)
+ ds.Revision = rev.main
+ }
+ ds.TotalKey++
+ return nil
+ }); err != nil {
+ return fmt.Errorf("不能写入bucket %s : %v", string(next), err)
+ }
+ }
+ return nil
+ }); err != nil {
+ return ds, err
+ }
+
+ ds.Hash = h.Sum32()
+ return ds, nil
+}
diff --git a/etcdutl/snapshot/util.go b/etcdutl/snapshot/util.go
index 2c1fae21fa1..8722c418077 100644
--- a/etcdutl/snapshot/util.go
+++ b/etcdutl/snapshot/util.go
@@ -19,8 +19,8 @@ import (
)
type revision struct {
- main int64
- sub int64
+ main int64 // 一个全局递增的主版本号,随put/txn/delete事务递增,一个事务内的key main版本号是一致的
+ sub int64 // 一个事务内的子版本号,从0开始随事务内put/delete操作递增
}
func bytesToRev(bytes []byte) revision {
diff --git a/etcdutl/snapshot/v3_snapshot.go b/etcdutl/snapshot/v3_snapshot.go
deleted file mode 100644
index 8958ba80da1..00000000000
--- a/etcdutl/snapshot/v3_snapshot.go
+++ /dev/null
@@ -1,489 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package snapshot
-
-import (
- "context"
- "crypto/sha256"
- "encoding/json"
- "fmt"
- "hash/crc32"
- "io"
- "os"
- "path/filepath"
- "reflect"
- "strings"
-
- "go.uber.org/zap"
-
- bolt "go.etcd.io/bbolt"
- "go.etcd.io/etcd/api/v3/etcdserverpb"
- "go.etcd.io/etcd/client/pkg/v3/fileutil"
- "go.etcd.io/etcd/client/pkg/v3/types"
- clientv3 "go.etcd.io/etcd/client/v3"
- "go.etcd.io/etcd/client/v3/snapshot"
- "go.etcd.io/etcd/server/v3/config"
- "go.etcd.io/etcd/server/v3/etcdserver"
- "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
- "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
- "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
- "go.etcd.io/etcd/server/v3/etcdserver/cindex"
- "go.etcd.io/etcd/server/v3/storage/backend"
- "go.etcd.io/etcd/server/v3/storage/schema"
- "go.etcd.io/etcd/server/v3/storage/wal"
- "go.etcd.io/etcd/server/v3/storage/wal/walpb"
- "go.etcd.io/etcd/server/v3/verify"
- "go.etcd.io/raft/v3"
- "go.etcd.io/raft/v3/raftpb"
-)
-
-// Manager defines snapshot methods.
-type Manager interface {
- // Save fetches snapshot from remote etcd server, saves data
- // to target path and returns server version. If the context "ctx" is canceled or timed out,
- // snapshot save stream will error out (e.g. context.Canceled,
- // context.DeadlineExceeded). Make sure to specify only one endpoint
- // in client configuration. Snapshot API must be requested to a
- // selected node, and saved snapshot is the point-in-time state of
- // the selected node.
- Save(ctx context.Context, cfg clientv3.Config, dbPath string) (version string, err error)
-
- // Status returns the snapshot file information.
- Status(dbPath string) (Status, error)
-
- // Restore restores a new etcd data directory from given snapshot
- // file. It returns an error if specified data directory already
- // exists, to prevent unintended data directory overwrites.
- Restore(cfg RestoreConfig) error
-}
-
-// NewV3 returns a new snapshot Manager for v3.x snapshot.
-func NewV3(lg *zap.Logger) Manager {
- return &v3Manager{lg: lg}
-}
-
-type v3Manager struct {
- lg *zap.Logger
-
- name string
- srcDbPath string
- walDir string
- snapDir string
- cl *membership.RaftCluster
-
- skipHashCheck bool
-}
-
-// hasChecksum returns "true" if the file size "n"
-// has appended sha256 hash digest.
-func hasChecksum(n int64) bool {
- // 512 is chosen because it's a minimum disk sector size
- // smaller than (and multiplies to) OS page size in most systems
- return (n % 512) == sha256.Size
-}
-
-// Save fetches snapshot from remote etcd server and saves data to target path.
-func (s *v3Manager) Save(ctx context.Context, cfg clientv3.Config, dbPath string) (version string, err error) {
- return snapshot.SaveWithVersion(ctx, s.lg, cfg, dbPath)
-}
-
-// Status is the snapshot file status.
-type Status struct {
- Hash uint32 `json:"hash"`
- Revision int64 `json:"revision"`
- TotalKey int `json:"totalKey"`
- TotalSize int64 `json:"totalSize"`
- // Version is equal to storageVersion of the snapshot
- // Empty if server does not supports versioned snapshots ( 0 {
- return fmt.Errorf("snapshot file integrity check failed. %d errors found.\n"+strings.Join(dbErrStrings, "\n"), len(dbErrStrings))
- }
- ds.TotalSize = tx.Size()
- v := schema.ReadStorageVersionFromSnapshot(tx)
- if v != nil {
- ds.Version = v.String()
- }
- c := tx.Cursor()
- for next, _ := c.First(); next != nil; next, _ = c.Next() {
- b := tx.Bucket(next)
- if b == nil {
- return fmt.Errorf("cannot get hash of bucket %s", string(next))
- }
- if _, err := h.Write(next); err != nil {
- return fmt.Errorf("cannot write bucket %s : %v", string(next), err)
- }
- iskeyb := (string(next) == "key")
- if err := b.ForEach(func(k, v []byte) error {
- if _, err := h.Write(k); err != nil {
- return fmt.Errorf("cannot write to bucket %s", err.Error())
- }
- if _, err := h.Write(v); err != nil {
- return fmt.Errorf("cannot write to bucket %s", err.Error())
- }
- if iskeyb {
- rev := bytesToRev(k)
- ds.Revision = rev.main
- }
- ds.TotalKey++
- return nil
- }); err != nil {
- return fmt.Errorf("cannot write bucket %s : %v", string(next), err)
- }
- }
- return nil
- }); err != nil {
- return ds, err
- }
-
- ds.Hash = h.Sum32()
- return ds, nil
-}
-
-// RestoreConfig configures snapshot restore operation.
-type RestoreConfig struct {
- // SnapshotPath is the path of snapshot file to restore from.
- SnapshotPath string
-
- // Name is the human-readable name of this member.
- Name string
-
- // OutputDataDir is the target data directory to save restored data.
- // OutputDataDir should not conflict with existing etcd data directory.
- // If OutputDataDir already exists, it will return an error to prevent
- // unintended data directory overwrites.
- // If empty, defaults to "[Name].etcd" if not given.
- OutputDataDir string
- // OutputWALDir is the target WAL data directory.
- // If empty, defaults to "[OutputDataDir]/member/wal" if not given.
- OutputWALDir string
-
- // PeerURLs is a list of member's peer URLs to advertise to the rest of the cluster.
- PeerURLs []string
-
- // InitialCluster is the initial cluster configuration for restore bootstrap.
- InitialCluster string
- // InitialClusterToken is the initial cluster token for etcd cluster during restore bootstrap.
- InitialClusterToken string
-
- // SkipHashCheck is "true" to ignore snapshot integrity hash value
- // (required if copied from data directory).
- SkipHashCheck bool
-}
-
-// Restore restores a new etcd data directory from given snapshot file.
-func (s *v3Manager) Restore(cfg RestoreConfig) error {
- pURLs, err := types.NewURLs(cfg.PeerURLs)
- if err != nil {
- return err
- }
- var ics types.URLsMap
- ics, err = types.NewURLsMap(cfg.InitialCluster)
- if err != nil {
- return err
- }
-
- srv := config.ServerConfig{
- Logger: s.lg,
- Name: cfg.Name,
- PeerURLs: pURLs,
- InitialPeerURLsMap: ics,
- InitialClusterToken: cfg.InitialClusterToken,
- }
- if err = srv.VerifyBootstrap(); err != nil {
- return err
- }
-
- s.cl, err = membership.NewClusterFromURLsMap(s.lg, cfg.InitialClusterToken, ics)
- if err != nil {
- return err
- }
-
- dataDir := cfg.OutputDataDir
- if dataDir == "" {
- dataDir = cfg.Name + ".etcd"
- }
- if fileutil.Exist(dataDir) && !fileutil.DirEmpty(dataDir) {
- return fmt.Errorf("data-dir %q not empty or could not be read", dataDir)
- }
-
- walDir := cfg.OutputWALDir
- if walDir == "" {
- walDir = filepath.Join(dataDir, "member", "wal")
- } else if fileutil.Exist(walDir) {
- return fmt.Errorf("wal-dir %q exists", walDir)
- }
-
- s.name = cfg.Name
- s.srcDbPath = cfg.SnapshotPath
- s.walDir = walDir
- s.snapDir = filepath.Join(dataDir, "member", "snap")
- s.skipHashCheck = cfg.SkipHashCheck
-
- s.lg.Info(
- "restoring snapshot",
- zap.String("path", s.srcDbPath),
- zap.String("wal-dir", s.walDir),
- zap.String("data-dir", dataDir),
- zap.String("snap-dir", s.snapDir),
- )
-
- if err = s.saveDB(); err != nil {
- return err
- }
- hardstate, err := s.saveWALAndSnap()
- if err != nil {
- return err
- }
-
- if err := s.updateCIndex(hardstate.Commit, hardstate.Term); err != nil {
- return err
- }
-
- s.lg.Info(
- "restored snapshot",
- zap.String("path", s.srcDbPath),
- zap.String("wal-dir", s.walDir),
- zap.String("data-dir", dataDir),
- zap.String("snap-dir", s.snapDir),
- )
-
- return verify.VerifyIfEnabled(verify.Config{
- ExactIndex: true,
- Logger: s.lg,
- DataDir: dataDir,
- })
-}
-
-func (s *v3Manager) outDbPath() string {
- return filepath.Join(s.snapDir, "db")
-}
-
-// saveDB copies the database snapshot to the snapshot directory
-func (s *v3Manager) saveDB() error {
- err := s.copyAndVerifyDB()
- if err != nil {
- return err
- }
-
- be := backend.NewDefaultBackend(s.lg, s.outDbPath())
- defer be.Close()
-
- err = schema.NewMembershipBackend(s.lg, be).TrimMembershipFromBackend()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (s *v3Manager) copyAndVerifyDB() error {
- srcf, ferr := os.Open(s.srcDbPath)
- if ferr != nil {
- return ferr
- }
- defer srcf.Close()
-
- // get snapshot integrity hash
- if _, err := srcf.Seek(-sha256.Size, io.SeekEnd); err != nil {
- return err
- }
- sha := make([]byte, sha256.Size)
- if _, err := srcf.Read(sha); err != nil {
- return err
- }
- if _, err := srcf.Seek(0, io.SeekStart); err != nil {
- return err
- }
-
- if err := fileutil.CreateDirAll(s.lg, s.snapDir); err != nil {
- return err
- }
-
- outDbPath := s.outDbPath()
-
- db, dberr := os.OpenFile(outDbPath, os.O_RDWR|os.O_CREATE, 0600)
- if dberr != nil {
- return dberr
- }
- dbClosed := false
- defer func() {
- if !dbClosed {
- db.Close()
- dbClosed = true
- }
- }()
- if _, err := io.Copy(db, srcf); err != nil {
- return err
- }
-
- // truncate away integrity hash, if any.
- off, serr := db.Seek(0, io.SeekEnd)
- if serr != nil {
- return serr
- }
- hasHash := hasChecksum(off)
- if hasHash {
- if err := db.Truncate(off - sha256.Size); err != nil {
- return err
- }
- }
-
- if !hasHash && !s.skipHashCheck {
- return fmt.Errorf("snapshot missing hash but --skip-hash-check=false")
- }
-
- if hasHash && !s.skipHashCheck {
- // check for match
- if _, err := db.Seek(0, io.SeekStart); err != nil {
- return err
- }
- h := sha256.New()
- if _, err := io.Copy(h, db); err != nil {
- return err
- }
- dbsha := h.Sum(nil)
- if !reflect.DeepEqual(sha, dbsha) {
- return fmt.Errorf("expected sha256 %v, got %v", sha, dbsha)
- }
- }
-
- // db hash is OK, can now modify DB so it can be part of a new cluster
- db.Close()
- return nil
-}
-
-// saveWALAndSnap creates a WAL for the initial cluster
-//
-// TODO: This code ignores learners !!!
-func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
- if err := fileutil.CreateDirAll(s.lg, s.walDir); err != nil {
- return nil, err
- }
-
- // add members again to persist them to the store we create.
- st := v2store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
- s.cl.SetStore(st)
- be := backend.NewDefaultBackend(s.lg, s.outDbPath())
- defer be.Close()
- s.cl.SetBackend(schema.NewMembershipBackend(s.lg, be))
- for _, m := range s.cl.Members() {
- s.cl.AddMember(m, true)
- }
-
- m := s.cl.MemberByName(s.name)
- md := &etcdserverpb.Metadata{NodeID: uint64(m.ID), ClusterID: uint64(s.cl.ID())}
- metadata, merr := md.Marshal()
- if merr != nil {
- return nil, merr
- }
- w, walerr := wal.Create(s.lg, s.walDir, metadata)
- if walerr != nil {
- return nil, walerr
- }
- defer w.Close()
-
- peers := make([]raft.Peer, len(s.cl.MemberIDs()))
- for i, id := range s.cl.MemberIDs() {
- ctx, err := json.Marshal((*s.cl).Member(id))
- if err != nil {
- return nil, err
- }
- peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
- }
-
- ents := make([]raftpb.Entry, len(peers))
- nodeIDs := make([]uint64, len(peers))
- for i, p := range peers {
- nodeIDs[i] = p.ID
- cc := raftpb.ConfChange{
- Type: raftpb.ConfChangeAddNode,
- NodeID: p.ID,
- Context: p.Context,
- }
- d, err := cc.Marshal()
- if err != nil {
- return nil, err
- }
- ents[i] = raftpb.Entry{
- Type: raftpb.EntryConfChange,
- Term: 1,
- Index: uint64(i + 1),
- Data: d,
- }
- }
-
- commit, term := uint64(len(ents)), uint64(1)
- hardState := raftpb.HardState{
- Term: term,
- Vote: peers[0].ID,
- Commit: commit,
- }
- if err := w.Save(hardState, ents); err != nil {
- return nil, err
- }
-
- b, berr := st.Save()
- if berr != nil {
- return nil, berr
- }
- confState := raftpb.ConfState{
- Voters: nodeIDs,
- }
- raftSnap := raftpb.Snapshot{
- Data: b,
- Metadata: raftpb.SnapshotMetadata{
- Index: commit,
- Term: term,
- ConfState: confState,
- },
- }
- sn := snap.New(s.lg, s.snapDir)
- if err := sn.SaveSnap(raftSnap); err != nil {
- return nil, err
- }
- snapshot := walpb.Snapshot{Index: commit, Term: term, ConfState: &confState}
- return &hardState, w.SaveSnapshot(snapshot)
-}
-
-func (s *v3Manager) updateCIndex(commit uint64, term uint64) error {
- be := backend.NewDefaultBackend(s.lg, s.outDbPath())
- defer be.Close()
-
- cindex.UpdateConsistentIndexForce(be.BatchTx(), commit, term)
- return nil
-}
diff --git a/go.mod b/go.mod
index f37399e5b32..317971ae6a4 100644
--- a/go.mod
+++ b/go.mod
@@ -1,104 +1,76 @@
-module go.etcd.io/etcd/v3
+module github.com/ls-2018/etcd_cn
-go 1.19
+go 1.16
-replace (
- go.etcd.io/etcd/api/v3 => ./api
- go.etcd.io/etcd/client/pkg/v3 => ./client/pkg
- go.etcd.io/etcd/client/v2 => ./client/v2
- go.etcd.io/etcd/client/v3 => ./client/v3
- go.etcd.io/etcd/etcdctl/v3 => ./etcdctl
- go.etcd.io/etcd/etcdutl/v3 => ./etcdutl
- go.etcd.io/etcd/pkg/v3 => ./pkg
- go.etcd.io/etcd/server/v3 => ./server
- go.etcd.io/etcd/tests/v3 => ./tests
-)
+replace go.etcd.io/etcd/api/v3 v3.5.2 => github.com/etcd-io/etcd/api/v3 v3.5.2
-require (
- github.com/bgentry/speakeasy v0.1.0
- github.com/cheggaaa/pb/v3 v3.1.0
- github.com/coreos/go-semver v0.3.1
- github.com/dustin/go-humanize v1.0.1
- github.com/spf13/cobra v1.6.1
- github.com/stretchr/testify v1.8.1
- go.etcd.io/bbolt v1.3.7
- go.etcd.io/etcd/api/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/client/v2 v2.306.0-alpha.0
- go.etcd.io/etcd/client/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/etcdctl/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/etcdutl/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/server/v3 v3.6.0-alpha.0
- go.etcd.io/etcd/tests/v3 v3.6.0-alpha.0
- go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a
- go.uber.org/zap v1.24.0
- golang.org/x/time v0.0.0-20220609170525-579cf78fd858
- google.golang.org/grpc v1.51.0
- google.golang.org/protobuf v1.28.1
-)
+replace github.com/etcd-io/etcd/api/v3 v3.5.2 => ./offical/api/v3
+
+replace go.etcd.io/etcd/client/v3 v3.5.2 => github.com/etcd-io/etcd/client/v3 v3.5.2
+replace github.com/etcd-io/client/api/v3 v3.5.2 => ./offical/client/v3
+replace github.com/ls-2018/etcd_cn/official => ./offical
require (
- cloud.google.com/go v0.81.0 // indirect
- github.com/VividCortex/ewma v1.1.1 // indirect
- github.com/benbjohnson/clock v1.1.0 // indirect
- github.com/beorn7/perks v1.0.1 // indirect
- github.com/cenkalti/backoff/v4 v4.2.0 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
- github.com/coreos/go-systemd/v22 v22.5.0 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/fatih/color v1.13.0 // indirect
- github.com/go-logr/logr v1.2.3 // indirect
- github.com/go-logr/stdr v1.2.2 // indirect
- github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang-jwt/jwt/v4 v4.4.3 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/golang/protobuf v1.5.2 // indirect
- github.com/google/btree v1.1.2 // indirect
- github.com/google/go-cmp v0.5.9 // indirect
- github.com/gorilla/websocket v1.4.2 // indirect
- github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
- github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
- github.com/inconshreveable/mousetrap v1.0.1 // indirect
- github.com/jonboulle/clockwork v0.3.0 // indirect
+ github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19 // indirect
+ github.com/alexkohler/nakedret v1.0.0
+ github.com/bgentry/speakeasy v0.1.0
+ github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03
+ github.com/coreos/go-semver v0.3.0
+ github.com/coreos/go-systemd/v22 v22.3.2
+ github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e
+ github.com/creack/pty v1.1.11
+ github.com/dustin/go-humanize v1.0.0
+ github.com/fatih/color v1.10.0 // indirect
+ github.com/form3tech-oss/jwt-go v3.2.3+incompatible
+ github.com/go-openapi/loads v0.19.5 // indirect
+ github.com/go-openapi/spec v0.19.9 // indirect
+ github.com/gogo/protobuf v1.3.2
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
+ github.com/golang/protobuf v1.5.2
+ github.com/google/btree v1.0.1
+ github.com/gordonklaus/ineffassign v0.0.0-20200809085317-e36bfde3bb78
+ github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
+ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
+ github.com/grpc-ecosystem/grpc-gateway v1.16.0
+ github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535
+ github.com/hexfusion/schwag v0.0.0-20170606222847-b7d0fc9aadaa
+ github.com/jonboulle/clockwork v0.2.2
+ github.com/json-iterator/go v1.1.11
+ github.com/kr/pretty v0.3.0 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
- github.com/mattn/go-isatty v0.0.14 // indirect
- github.com/mattn/go-runewidth v0.0.12 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
- github.com/olekukonko/tablewriter v0.0.5 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_golang v1.14.0 // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/procfs v0.8.0 // indirect
- github.com/rivo/uniseg v0.2.0 // indirect
- github.com/sirupsen/logrus v1.8.1 // indirect
- github.com/soheilhy/cmux v0.1.5 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
- github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect
- github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 // indirect
- go.opentelemetry.io/otel v1.11.2 // indirect
- go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 // indirect
- go.opentelemetry.io/otel/metric v0.34.0 // indirect
- go.opentelemetry.io/otel/sdk v1.11.2 // indirect
- go.opentelemetry.io/otel/trace v1.11.2 // indirect
- go.opentelemetry.io/proto/otlp v0.19.0 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.9.0 // indirect
- golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
- golang.org/x/net v0.5.0 // indirect
- golang.org/x/sys v0.4.0 // indirect
- golang.org/x/text v0.6.0 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect
- gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
- sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
- sigs.k8s.io/yaml v1.3.0 // indirect
+ github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f
+ github.com/mgechev/revive v1.0.2
+ github.com/mikefarah/yq/v3 v3.0.0-20201125113350-f42728eef735
+ github.com/modern-go/reflect2 v1.0.1
+ github.com/olekukonko/tablewriter v0.0.5
+ github.com/prometheus/client_golang v1.11.0
+ github.com/rogpeppe/go-internal v1.8.1 // indirect
+ github.com/sirupsen/logrus v1.7.0 // indirect
+ github.com/soheilhy/cmux v0.1.5
+ github.com/spf13/cobra v1.1.3
+ github.com/spf13/pflag v1.0.5
+ github.com/stretchr/testify v1.7.0
+ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802
+ github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f // indirect
+ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2
+ go.etcd.io/bbolt v1.3.6
+ go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0
+ go.opentelemetry.io/otel v0.20.0
+ go.opentelemetry.io/otel/exporters/otlp v0.20.0
+ go.opentelemetry.io/otel/sdk v0.20.0
+ go.uber.org/multierr v1.6.0
+ go.uber.org/zap v1.17.0
+ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
+ golang.org/x/net v0.0.0-20211008194852-3b03d305991f
+ golang.org/x/sys v0.0.0-20220307203707-22a9840ba4d7
+ golang.org/x/text v0.3.7 // indirect
+ golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba
+ google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84
+ google.golang.org/grpc v1.38.0
+ gopkg.in/cheggaaa/pb.v1 v1.0.28
+ gopkg.in/natefinch/lumberjack.v2 v2.0.0
+ honnef.co/go/tools v0.0.1-2019.2.3
+ mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7
+ sigs.k8s.io/yaml v1.2.0
)
diff --git a/go.sum b/go.sum
index 620de23812b..b7439902a78 100644
--- a/go.sum
+++ b/go.sum
@@ -4,152 +4,188 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
-github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
+github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19 h1:bYOD6QJnBJY79MJQR1i9cyQePG5oNDZXDKL2bhN/uvE=
+github.com/akhenakh/hunspellgo v0.0.0-20160221122622-9db38fa26e19/go.mod h1:HcqyLXmWoESd/vPSbCPqvgw5l5cMM5PtoqFOnXLjSeM=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alexkohler/nakedret v1.0.0 h1:S/bzOFhZHYUJp6qPmdXdFHS5nlWGFmLmoc8QOydvotE=
+github.com/alexkohler/nakedret v1.0.0/go.mod h1:tfDQbtPt67HhBK/6P0yNktIX7peCxfOp0jO9007DrLE=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0=
+github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
+github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
-github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cheggaaa/pb/v3 v3.1.0 h1:3uouEsl32RL7gTiQsuaXD4Bzbfl5tGztXGUvXbs4O04=
-github.com/cheggaaa/pb/v3 v3.1.0/go.mod h1:YjrevcBqadFDaGQKRdmZxTY42pXEqda48Ea3lt0K/BE=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03 h1:0wUHjDfbCAROEAZ96zAJGwcNMkPIheFaIjtQyv3QqfM=
+github.com/chzchzchz/goword v0.0.0-20170907005317-a9744cb52b03/go.mod h1:uFE9hX+zXEwvyUThZ4gDb9vkAwc5DoHUnRSEpH0VrOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
-github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
-github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
-github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
-github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
-github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
-github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e h1:vHRufSa2k8tfkcDdia1vJFa+oiBvvPxW94mg76PPAoA=
+github.com/coreos/license-bill-of-materials v0.0.0-20190913234955-13baff47494e/go.mod h1:4xMOusJ7xxc84WclVxKT8+lNfGYDwojOUC2OQNCwcj4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
+github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
-github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
-github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
-github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
+github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
+github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
-github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
+github.com/go-openapi/analysis v0.19.10 h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE=
+github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ=
+github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/errors v0.19.3 h1:7MGZI1ibQDLasvAz8HuhvYk9eNJbJkCOXWsSjjMS+Zc=
+github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.19.4 h1:3Vw+rh13uq2JFNxgnMTGE1rnoieU9FmyE1gvnyylsYg=
+github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.19.5 h1:jZVYWawIQiA1NBnHla28ktg6hrcfTHsCE+3QLVRBIls=
+github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY=
+github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
+github.com/go-openapi/spec v0.19.9 h1:9z9cbFuZJ7AcvOHKIY+f6Aevb4vObNDkTEyoMfO7rAc=
+github.com/go-openapi/spec v0.19.9/go.mod h1:vqK/dIdLGCosfvYsQV3WfC7N3TiZSnGY2RZKoFK7X28=
+github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.19.4 h1:eRvaqAhpL0IL6Trh5fDsGnGhiXndzHFuA05w6sXH6/g=
+github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
+github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.7 h1:VRuXN2EnMSsZdauzdss6JBC29YotDqG59BZ+tdlIL1s=
+github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/goccy/go-yaml v1.8.1 h1:JuZRFlqLM5cWF6A+waL8AKVuCcqvKOuhJtUQI+L3ez0=
+github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU=
-github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
-github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -159,235 +195,328 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
-github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gordonklaus/ineffassign v0.0.0-20200809085317-e36bfde3bb78 h1:U/zHjaVG/sECz5xhnh7kPH+Fv/maPbhZPcaTquo5sPg=
+github.com/gordonklaus/ineffassign v0.0.0-20200809085317-e36bfde3bb78/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535 h1:BGeD3v3lyKZy+ocGtprXiDXjIiXvZDfuyII7Lym7GbQ=
+github.com/gyuho/gocovmerge v0.0.0-20171205171859-50c7e6afd535/go.mod h1:xV7b0Cn2irnP1jU+mMYvqPAPuFPNjtgB+rvKu/dLIz4=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
-github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg=
-github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hexfusion/schwag v0.0.0-20170606222847-b7d0fc9aadaa h1:oDcxzjIf33MTX7b8Eu7eO3a/z8mlTT+blyEoVxBmUUg=
+github.com/hexfusion/schwag v0.0.0-20170606222847-b7d0fc9aadaa/go.mod h1:wSgrm+n3LvHOVxUJo2ha5ffLqRmt6+oGoD6J/suB66c=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8=
+github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow=
-github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f h1:Kc3s6QFyh9DLgInXpWKuG+8I7R7lXbnP7mcoOVIt6KY=
+github.com/mdempsky/unconvert v0.0.0-20200228143138-95ecdbfc0b5f/go.mod h1:AmCV4WB3cDMZqgPk+OUQKumliiQS4ZYsBt3AXekyuAU=
+github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM=
+github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
+github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg=
+github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mikefarah/yq/v3 v3.0.0-20201125113350-f42728eef735 h1:Qn41fatPrqv5qVpDFx+4ABF14LNj9jiNLm/BsrDb01U=
+github.com/mikefarah/yq/v3 v3.0.0-20201125113350-f42728eef735/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
-github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
-github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
-github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
+github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
-github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
+github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f h1:92ZQJRegaqnKjz9HY9an696Sw5EmAqRv0eie/U2IE6k=
+github.com/trustmaster/go-aspell v0.0.0-20200701131845-c2b1f55bec8f/go.mod h1:wxUiQ1klFJmwnM41kQI7IT2g8jjOKbtuL54LdjkxAI0=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
-go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
-go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a h1:Znv2XJyAf/fsJsFNt9toO8uyXwwHQ44wxqsvdSxipj4=
-go.etcd.io/raft/v3 v3.0.0-20221201111702-eaa6808e1f7a/go.mod h1:eMshmuwXLWZrjHXN8ZgYrOMQRSbHqi5M84DEZWhG+o4=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116 h1:QQiUXlqz+d96jyNG71NE+IGTgOK6Xlhdx+PzvfbLHlQ=
+go.etcd.io/protodoc v0.0.0-20180829002748-484ab544e116/go.mod h1:F9kog+iVAuvPJucb1dkYcDcbV0g4uyGEHllTP5NrXiw=
+go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mongodb.org/mongo-driver v1.3.0 h1:ew6uUIeJOo+qdUUv7LxFCUhtWmVv7ZV/Xuy4FAUsw2E=
+go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 h1:+uFejS4DCfNH6d3xODVIGsdhzgzhh45p9gpbHQMbdZI=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc=
-go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0=
-go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI=
-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY=
-go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY=
-go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8=
-go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8=
-go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU=
-go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU=
-go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0=
-go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA=
+go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
-go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
+go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
-go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
-golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -397,133 +526,91 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.0.0-20211008194852-3b03d305991f h1:1scJEYZBaF48BaG6tYbtxmLcXqwYGSfGcMoStTqkkIw=
+golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -531,110 +618,69 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220307203707-22a9840ba4d7 h1:8IVLkfbr2cLhv0a/vKq4UFUcJym8RmDoDboxCFWEjYE=
+golang.org/x/sys v0.0.0-20220307203707-22a9840ba4d7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U=
-golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -644,63 +690,24 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 h1:b9mVrqYfq3P4bCdaLg1qtBnPzUYgglsIdjZkL/fQVOE=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/6mezTw6oA14cmKC96FeUwL6A9bd4=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
-google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -709,43 +716,50 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=
+gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE=
+gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7 h1:kAREL6MPwpsk1/PQPFD3Eg7WAQR5mPTWZJaBiG5LDbY=
+mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
-sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
-sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
-sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
+sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/hack/benchmark/README.md b/hack/benchmark/README.md
index 6a09c530299..4ece14ca1da 100644
--- a/hack/benchmark/README.md
+++ b/hack/benchmark/README.md
@@ -10,5 +10,7 @@ Benchmark 3-member etcd cluster to get its read and write performance.
## Caveat
-1. Set environment variable `GOMAXPROCS` as the number of available cores to maximize CPU resources for both etcd member and bench process.
-2. Set the number of open files per process as 10000 for amounts of client connections for both etcd member and benchmark process.
+1. Set environment variable `GOMAXPROCS` as the number of available cores to maximize CPU resources for both etcd member
+ and bench process.
+2. Set the number of open files per process as 10000 for amounts of client connections for both etcd member and
+ benchmark process.
diff --git a/hack/kubernetes-deploy/README.md b/hack/kubernetes-deploy/README.md
index bd34115d719..22df221d4aa 100644
--- a/hack/kubernetes-deploy/README.md
+++ b/hack/kubernetes-deploy/README.md
@@ -18,4 +18,4 @@ $ kubectl create -f vulcand.yml
TODO:
- create a replication controller like service that knows how to add and remove nodes from the cluster correctly
-- use kubernetes secrets API to configure TLS for etcd clients and peers
+- use kubernetes secrets API to configureAndSendRaft TLS for etcd clients and peers
diff --git a/hack/kubernetes-deploy/etcd.yml b/hack/kubernetes-deploy/etcd.yml
index 84bf6be95ad..416f5729117 100644
--- a/hack/kubernetes-deploy/etcd.yml
+++ b/hack/kubernetes-deploy/etcd.yml
@@ -4,10 +4,10 @@ metadata:
name: etcd-client
spec:
ports:
- - name: etcd-client-port
- port: 2379
- protocol: TCP
- targetPort: 2379
+ - name: etcd-client-port
+ port: 2379
+ protocol: TCP
+ targetPort: 2379
selector:
app: etcd
@@ -22,31 +22,31 @@ metadata:
name: etcd0
spec:
containers:
- - command:
- - /usr/local/bin/etcd
- - --name
- - etcd0
- - --initial-advertise-peer-urls
- - http://etcd0:2380
- - --listen-peer-urls
- - http://0.0.0.0:2380
- - --listen-client-urls
- - http://0.0.0.0:2379
- - --advertise-client-urls
- - http://etcd0:2379
- - --initial-cluster
- - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
- - --initial-cluster-state
- - new
- image: quay.io/coreos/etcd:latest
- name: etcd0
- ports:
- - containerPort: 2379
- name: client
- protocol: TCP
- - containerPort: 2380
- name: server
- protocol: TCP
+ - command:
+ - /usr/local/bin/etcd
+ - --name
+ - etcd0
+ - --initial-advertise-peer-urls
+ - http://etcd0:2380
+ - --listen-peer-urls
+ - http://0.0.0.0:2380
+ - --listen-client-urls
+ - http://0.0.0.0:2379
+ - --advertise-client-urls
+ - http://etcd0:2379
+ - --initial-cluster
+ - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
+ - --initial-cluster-state
+ - new
+ image: quay.io/coreos/etcd:latest
+ name: etcd0
+ ports:
+ - containerPort: 2379
+ name: client
+ protocol: TCP
+ - containerPort: 2380
+ name: etcd
+ protocol: TCP
restartPolicy: Always
---
@@ -59,14 +59,14 @@ metadata:
name: etcd0
spec:
ports:
- - name: client
- port: 2379
- protocol: TCP
- targetPort: 2379
- - name: server
- port: 2380
- protocol: TCP
- targetPort: 2380
+ - name: client
+ port: 2379
+ protocol: TCP
+ targetPort: 2379
+ - name: etcd
+ port: 2380
+ protocol: TCP
+ targetPort: 2380
selector:
etcd_node: etcd0
@@ -81,31 +81,31 @@ metadata:
name: etcd1
spec:
containers:
- - command:
- - /usr/local/bin/etcd
- - --name
- - etcd1
- - --initial-advertise-peer-urls
- - http://etcd1:2380
- - --listen-peer-urls
- - http://0.0.0.0:2380
- - --listen-client-urls
- - http://0.0.0.0:2379
- - --advertise-client-urls
- - http://etcd1:2379
- - --initial-cluster
- - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
- - --initial-cluster-state
- - new
- image: quay.io/coreos/etcd:latest
- name: etcd1
- ports:
- - containerPort: 2379
- name: client
- protocol: TCP
- - containerPort: 2380
- name: server
- protocol: TCP
+ - command:
+ - /usr/local/bin/etcd
+ - --name
+ - etcd1
+ - --initial-advertise-peer-urls
+ - http://etcd1:2380
+ - --listen-peer-urls
+ - http://0.0.0.0:2380
+ - --listen-client-urls
+ - http://0.0.0.0:2379
+ - --advertise-client-urls
+ - http://etcd1:2379
+ - --initial-cluster
+ - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
+ - --initial-cluster-state
+ - new
+ image: quay.io/coreos/etcd:latest
+ name: etcd1
+ ports:
+ - containerPort: 2379
+ name: client
+ protocol: TCP
+ - containerPort: 2380
+ name: etcd
+ protocol: TCP
restartPolicy: Always
---
@@ -118,14 +118,14 @@ metadata:
name: etcd1
spec:
ports:
- - name: client
- port: 2379
- protocol: TCP
- targetPort: 2379
- - name: server
- port: 2380
- protocol: TCP
- targetPort: 2380
+ - name: client
+ port: 2379
+ protocol: TCP
+ targetPort: 2379
+ - name: etcd
+ port: 2380
+ protocol: TCP
+ targetPort: 2380
selector:
etcd_node: etcd1
@@ -140,31 +140,31 @@ metadata:
name: etcd2
spec:
containers:
- - command:
- - /usr/local/bin/etcd
- - --name
- - etcd2
- - --initial-advertise-peer-urls
- - http://etcd2:2380
- - --listen-peer-urls
- - http://0.0.0.0:2380
- - --listen-client-urls
- - http://0.0.0.0:2379
- - --advertise-client-urls
- - http://etcd2:2379
- - --initial-cluster
- - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
- - --initial-cluster-state
- - new
- image: quay.io/coreos/etcd:latest
- name: etcd2
- ports:
- - containerPort: 2379
- name: client
- protocol: TCP
- - containerPort: 2380
- name: server
- protocol: TCP
+ - command:
+ - /usr/local/bin/etcd
+ - --name
+ - etcd2
+ - --initial-advertise-peer-urls
+ - http://etcd2:2380
+ - --listen-peer-urls
+ - http://0.0.0.0:2380
+ - --listen-client-urls
+ - http://0.0.0.0:2379
+ - --advertise-client-urls
+ - http://etcd2:2379
+ - --initial-cluster
+ - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380
+ - --initial-cluster-state
+ - new
+ image: quay.io/coreos/etcd:latest
+ name: etcd2
+ ports:
+ - containerPort: 2379
+ name: client
+ protocol: TCP
+ - containerPort: 2380
+ name: etcd
+ protocol: TCP
restartPolicy: Always
---
@@ -177,13 +177,13 @@ metadata:
name: etcd2
spec:
ports:
- - name: client
- port: 2379
- protocol: TCP
- targetPort: 2379
- - name: server
- port: 2380
- protocol: TCP
- targetPort: 2380
+ - name: client
+ port: 2379
+ protocol: TCP
+ targetPort: 2379
+ - name: etcd
+ port: 2380
+ protocol: TCP
+ targetPort: 2380
selector:
etcd_node: etcd2
diff --git a/hack/kubernetes-deploy/vulcand.yml b/hack/kubernetes-deploy/vulcand.yml
index bb61eec461b..fd45f41a19c 100644
--- a/hack/kubernetes-deploy/vulcand.yml
+++ b/hack/kubernetes-deploy/vulcand.yml
@@ -6,17 +6,17 @@ metadata:
name: vulcand
spec:
containers:
- - command:
- - /go/bin/vulcand
- - -apiInterface=0.0.0.0
- - --etcd=http://etcd-client:2379
- image: mailgun/vulcand:v0.8.0-beta.2
- name: vulcand
- ports:
- - containerPort: 8081
- name: api
- protocol: TCP
- - containerPort: 8082
- name: server
- protocol: TCP
+ - command:
+ - /go/bin/vulcand
+ - -apiInterface=0.0.0.0
+ - --etcd=http://etcd-client:2379
+ image: mailgun/vulcand:v0.8.0-beta.2
+ name: vulcand
+ ports:
+ - containerPort: 8081
+ name: api
+ protocol: TCP
+ - containerPort: 8082
+ name: etcd
+ protocol: TCP
restartPolicy: Always
diff --git a/hack/patch/README.md b/hack/patch/README.md
deleted file mode 100644
index 32323f17996..00000000000
--- a/hack/patch/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# ./hack/patch/cherrypick.sh
-
-Handles cherry-picks of PR(s) from etcd main to a stable etcd release branch automatically.
-
-## Setup
-
-Set the `UPSTREAM_REMOTE` and `FORK_REMOTE` environment variables.
-`UPSTREAM_REMOTE` should be set to git remote name of `github.com/etcd-io/etcd`,
-and `FORK_REMOTE` should be set to the git remote name of the forked etcd
-repo (`github.com/${github-username}/etcd`). Use `git remote -v` to
-look up the git remote names. If etcd has not been forked, create
-one on github.com and register it locally with `git remote add ...`.
-
-
-```
-export UPSTREAM_REMOTE=upstream
-export FORK_REMOTE=origin
-export GITHUB_USER=${github-username}
-```
-
-Next, install hub from https://github.com/github/hub
-
-## Usage
-
-To cherry pick PR 12345 onto release-3.2 and propose is as a PR, run:
-
-```sh
-./hack/patch/cherrypick.sh ${UPSTREAM_REMOTE}/release-3.2 12345
-```
-
-To cherry pick 12345 then 56789 and propose them togther as a single PR, run:
-
-```
-./hack/patch/cherrypick.sh ${UPSTREAM_REMOTE}/release-3.2 12345 56789
-```
-
-
diff --git a/hack/patch/cherrypick.sh b/hack/patch/cherrypick.sh
deleted file mode 100755
index ad143514f55..00000000000
--- a/hack/patch/cherrypick.sh
+++ /dev/null
@@ -1,229 +0,0 @@
-#!/usr/bin/env bash
-
-# Based on github.com/kubernetes/kubernetes/blob/v1.8.2/hack/cherry_pick_pull.sh
-
-# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
-# meta.) Assumes you care about pulls from remote "upstream" and
-# checks thems out to a branch named:
-# automated-cherry-pick-of---
-
-set -o errexit
-set -o nounset
-set -o pipefail
-
-declare -r ETCD_ROOT="$(dirname "${BASH_SOURCE}")/../.."
-cd "${ETCD_ROOT}"
-
-declare -r STARTINGBRANCH=$(git symbolic-ref --short HEAD)
-declare -r REBASEMAGIC="${ETCD_ROOT}/.git/rebase-apply"
-DRY_RUN=${DRY_RUN:-""}
-REGENERATE_DOCS=${REGENERATE_DOCS:-""}
-UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
-FORK_REMOTE=${FORK_REMOTE:-origin}
-
-if [[ -z ${GITHUB_USER:-} ]]; then
- echo "Please export GITHUB_USER= (or GH organization, if that's where your fork lives)"
- exit 1
-fi
-
-if ! which hub > /dev/null; then
- echo "Can't find 'hub' tool in PATH, please install from https://github.com/github/hub"
- exit 1
-fi
-
-if [[ "$#" -lt 2 ]]; then
- echo "${0} ...: cherry pick one or more onto and leave instructions for proposing pull request"
- echo
- echo " Checks out and handles the cherry-pick of